diff --git a/.gitignore b/.gitignore index 89142ae34e9b..b684563a1c75 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ *.so # Folders +.cache _obj _test diff --git a/.travis.yml b/.travis.yml index 196b6b8e3a2b..1449496a3d78 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,6 @@ sudo: required go: - 1.x -go_import_path: k8s.io/minikube install: - echo "Don't run anything." diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 639cf4a5fd19..000000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1079 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:81f8c061c3d18ed1710957910542bc17d2b789c6cd19e0f654c30b35fd255ca5" - name = "github.com/Azure/go-ansiterm" - packages = [ - ".", - "winterm", - ] - pruneopts = "NUT" - revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" - -[[projects]] - digest = "1:f314424048778dd9cd7bb5c7807a8396b0c6b2a133935f15ad350dcd36c27286" - name = "github.com/Parallels/docker-machine-parallels" - packages = ["."] - pruneopts = "NUT" - revision = "f23cb0b3d3de98df376bbfed9d97bc1f355df864" - version = "v1.3.0" - -[[projects]] - digest = "1:aba270497eb2d49f5cba6f4162d524b9a1195a24cbce8be20bf56a0051f47deb" - name = "github.com/blang/semver" - packages = ["."] - pruneopts = "NUT" - revision = "b38d23b8782a487059e8fc8773e9a5b228a77cb6" - version = "v3.5.0" - -[[projects]] - digest = "1:04873dc5e06932b750eac24a1d90eabae58249df63bed6823e77b62a0160a297" - name = "github.com/c4milo/gotoolkit" - packages = ["."] - pruneopts = "NUT" - revision = "bcc06269efa974c4f098619d9aae436846e83d84" - -[[projects]] - branch = "master" - digest = "1:cd7ba2b29e93e2a8384e813dfc80ebb0f85d9214762e6ca89bb55a58092eab87" - name = "github.com/cloudfoundry-attic/jibber_jabber" - packages = ["."] - pruneopts = "NUT" - revision = "bcc4c8345a21301bf47c032ff42dd1aae2fe3027" - -[[projects]] - digest = "1:cc832d4c674b57b5c67f683f75fba043dd3eec6fcd9b936f00cc8ddf439f2131" - name = "github.com/cpuguy83/go-md2man" - packages = ["md2man"] - pruneopts = "NUT" - revision = "71acacd42f85e5e82f70a55327789582a5200a90" - version = "v1.0.4" - -[[projects]] - digest = "1:6b21090f60571b20b3ddc2c8e48547dffcf409498ed6002c2cada023725ed377" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "NUT" - revision = "782f4967f2dc4564575ca782fe2d04090b5faca8" - -[[projects]] - branch = "master" - digest = "1:f1beae74e4ad389f4a6c1eaeb9539c9a0c1b7b934aee3576147b988e09184f9c" - name = "github.com/docker/docker" - packages = [ - "pkg/term", - "pkg/term/windows", - ] - pruneopts = "NUT" - revision = "bbe08dc7f0b9498aa75f7956c0c342512a73a3d4" - -[[projects]] - digest = "1:9d8fe33c2e1aa01c76db67f285ffdb7dbdb5c5d90d9deb296526d67b9798ce7b" - name = "github.com/docker/go-units" - packages = ["."] - pruneopts = "NUT" - revision = "9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1" - -[[projects]] - branch = "master" - digest = "1:d4104968fb55cff5276444ffcf4693fa03deadc7cd98ed15005bf55e26a2ded1" - name = "github.com/docker/machine" - packages = [ - "commands/mcndirs", - "drivers/errdriver", - "drivers/hyperv", - "drivers/none", - "drivers/virtualbox", - "drivers/vmwarefusion", - "libmachine", - "libmachine/auth", - "libmachine/cert", - "libmachine/check", - "libmachine/drivers", - "libmachine/drivers/plugin", - "libmachine/drivers/plugin/localbinary", - "libmachine/drivers/rpc", - "libmachine/engine", - "libmachine/host", - "libmachine/log", - "libmachine/mcndockerclient", - "libmachine/mcnerror", - "libmachine/mcnflag", - "libmachine/mcnutils", - "libmachine/persist", - "libmachine/provision", - "libmachine/provision/pkgaction", - "libmachine/provision/serviceaction", - "libmachine/shell", - "libmachine/ssh", - "libmachine/state", - "libmachine/swarm", - "libmachine/version", - "libmachine/versioncmp", - "version", - ] - pruneopts = "NUT" - revision = "533ea58a3e7efb4b2b6cc24bb0b7b565e64d6b0e" - source = "github.com/machine-drivers/machine" - -[[projects]] - digest = "1:364ef519880c6024191ac7660244e5478b193489bb35755f843627cc86cfb411" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "NUT" - revision = "f12c6236fe7b5cf6bcf30e5935d08cb079d78334" - -[[projects]] - digest = "1:abfe129dc92b16fbf0cc9d6336096a2823151756f62072a700eb10754141b38e" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "NUT" - revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" - -[[projects]] - digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "NUT" - revision = "c0656edd0d9eab7c66d1eb0c568f9039345796f7" - -[[projects]] - digest = "1:78b8040ece2ff622580def2708b9eb0b2857711b6744c475439bf337e9c677ea" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "NUT" - revision = "44145f04b68cf362d9c4df2182967c2275eaefed" - -[[projects]] - digest = "1:7672c206322f45b33fac1ae2cb899263533ce0adcc6481d207725560208ec84e" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "NUT" - revision = "02826c3e79038b59d737d3b1c0a1d937f71a4433" - -[[projects]] - digest = "1:0d390d7037c2aecc37e78c2cfbe43d020d6f1fa83fd22266b7ec621189447d57" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "NUT" - revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - -[[projects]] - branch = "master" - digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" - name = "github.com/google/btree" - packages = ["."] - pruneopts = "NUT" - revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" - -[[projects]] - digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0" - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/cmpopts", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value", - ] - pruneopts = "NUT" - revision = "3af367b6b30c263d47e8895973edcca9a49cf029" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:dfed0914a28dd3a8561fbfdd5c7a1deb2b90dee8edea6f58c9285680fc37b5c2" - name = "github.com/google/go-containerregistry" - packages = [ - "pkg/authn", - "pkg/name", - "pkg/v1", - "pkg/v1/partial", - "pkg/v1/remote", - "pkg/v1/remote/transport", - "pkg/v1/stream", - "pkg/v1/tarball", - "pkg/v1/types", - "pkg/v1/v1util", - ] - pruneopts = "NUT" - revision = "019cdfc6adf96a4905a1b93a7aeaea1e50c0b6cf" - -[[projects]] - digest = "1:63ede27834b468648817fb80cfb95d40abfc61341f89cb7a0d6779b6aa955425" - name = "github.com/google/go-github" - packages = ["github"] - pruneopts = "NUT" - revision = "a5cb647b1fac8ba77e1cf25af2f9526658ab63e3" - version = "v21.0.0" - -[[projects]] - digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690" - name = "github.com/google/go-querystring" - packages = ["query"] - pruneopts = "NUT" - revision = "44c6ddd0a2342c386950e880b658017258da92fc" - version = "v1.0.0" - -[[projects]] - digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "NUT" - revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" - -[[projects]] - digest = "1:27b4ab41ffdc76ad6db56db327a4db234a59588ef059fc3fd678ba0bc6b9094f" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "NUT" - revision = "0c5108395e2debce0d731cf0287ddf7242066aba" - -[[projects]] - branch = "master" - digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621" - name = "github.com/gregjones/httpcache" - packages = [ - ".", - "diskcache", - ] - pruneopts = "NUT" - revision = "9cad4c3443a7200dd6400aef47183728de563a38" - -[[projects]] - digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19" - name = "github.com/hashicorp/errwrap" - packages = ["."] - pruneopts = "NUT" - revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" - -[[projects]] - digest = "1:03950bb9b6362247c000f4b4c24bd1e62e952cb40cd204971017e34260ede4e2" - name = "github.com/hashicorp/go-multierror" - packages = ["."] - pruneopts = "NUT" - revision = "8c5f0ad9360406a3807ce7de6bc73269a91a6e51" - -[[projects]] - digest = "1:c7d9de42b661ba85788f5f631cbac165795a2ff7dc1c59a4241d6228b129c3e4" - name = "github.com/hashicorp/go-version" - packages = ["."] - pruneopts = "NUT" - revision = "d40cf49b3a77bba84a7afdbd7f1dc295d114efb1" - version = "v1.1.0" - -[[projects]] - digest = "1:475b179287e8afdcd352014b2c2500e67decdf63e66125e2129286873453e1cd" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "NUT" - revision = "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" - -[[projects]] - digest = "1:4eb7aa74693933be7f21dc465c49f9c0e03391262201ceac4dbf6a1be0bb7649" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token", - ] - pruneopts = "NUT" - revision = "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1" - -[[projects]] - digest = "1:0fc7a3b3aa025745e557f5286862c307c042a0445f0a99a3a0948a6964e12492" - name = "github.com/hooklift/iso9660" - packages = ["."] - pruneopts = "NUT" - revision = "1cf07e5970d810f027bfbdfa2e9ad86db479c53a" - -[[projects]] - digest = "1:8f5fa95e43ab4a43056b7b65ec1614b4440f33bbfef2fa0a4d4707aedcb1a023" - name = "github.com/imdario/mergo" - packages = ["."] - pruneopts = "NUT" - revision = "6633656539c1639d9d78127b7d47c622b5d7b6dc" - -[[projects]] - digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - pruneopts = "NUT" - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - branch = "master" - digest = "1:3526ea57285a47f99273a9b00252869d06888b08cd7358d6f89c35db6ec1605b" - name = "github.com/intel-go/cpuid" - packages = ["."] - pruneopts = "NUT" - revision = "1a4a6f06a1c643c8fbd339bd61d980960627d09e" - -[[projects]] - branch = "master" - digest = "1:29b8c51f736120581688521c42f4772cc42ab7942402ee38877be23bed001ede" - name = "github.com/jimmidyson/go-download" - packages = ["."] - pruneopts = "NUT" - revision = "7f9a90c8c95bee1bb7de9e72c682c67c8bf5546d" - -[[projects]] - branch = "master" - digest = "1:08e9d908b342ad3ec85cb188fd0c16c6e3e3fd4af23f1736f2eb2e174a6ec43c" - name = "github.com/johanneswuerbach/nfsexports" - packages = ["."] - pruneopts = "NUT" - revision = "1aa528dcb3451733ef505b9a268843831cb44b3f" - -[[projects]] - digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "NUT" - revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "v1.1.5" - -[[projects]] - digest = "1:47fa0ef6ae6b0387dacb77c7ab8827b4ef621650fd613e43ece61de4cb3f5019" - name = "github.com/kr/fs" - packages = ["."] - pruneopts = "NUT" - revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b" - -[[projects]] - digest = "1:7c99b7161783a39276d407232577a0510a8d6f8d0b6e80179049c0af060a3de1" - name = "github.com/libvirt/libvirt-go" - packages = ["."] - pruneopts = "NUT" - revision = "5ba8227244c30438609adcec6ea84dc1e688dfbd" - version = "v3.4.0" - -[[projects]] - digest = "1:c436d61d2f9af426241770422ef9f7a21edc7898963caa88c997c3f968770045" - name = "github.com/machine-drivers/docker-machine-driver-vmware" - packages = ["pkg/drivers/vmware/config"] - pruneopts = "NUT" - revision = "cd992887ede19ae63e030c63dda5593f19ed569c" - version = "v0.1.1" - -[[projects]] - digest = "1:1745a9a20b54531ca6d6db617f47ac6d367e5d004c7a44ba39149928bca8caff" - name = "github.com/magiconair/properties" - packages = ["."] - pruneopts = "NUT" - revision = "61b492c03cf472e0c6419be5899b8e0dc28b1b88" - -[[projects]] - digest = "1:bffa444ca07c69c599ae5876bc18b25bfd5fa85b297ca10a25594d284a7e9c5d" - name = "github.com/mattn/go-isatty" - packages = ["."] - pruneopts = "NUT" - revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" - version = "v0.0.4" - -[[projects]] - digest = "1:f2d263faf8641cd0a31bba2d034eb2f9ab9f3c3348b06ac9ad54b9a4ea692dee" - name = "github.com/mattn/go-runewidth" - packages = ["."] - pruneopts = "NUT" - revision = "737072b4e32b7a5018b4a7125da8d12de90e8045" - -[[projects]] - branch = "master" - digest = "1:2d209eb757bb4101584c2618becd4026593509343ce55491233fd090faa1f1da" - name = "github.com/mitchellh/go-ps" - packages = ["."] - pruneopts = "NUT" - revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" - -[[projects]] - digest = "1:c7fe3a5aad0ae944376d50572fb8249580562d4331b9a1c0a9da9610336688aa" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "NUT" - revision = "53818660ed4955e899c0bcafa97299a388bd7c8e" - -[[projects]] - digest = "1:d512df57c965a0b8539f58373cbf6653771cd545fcd4f3706374be0cbcee557d" - name = "github.com/moby/hyperkit" - packages = ["go"] - pruneopts = "NUT" - revision = "a12cd7250bcd8d689078e3e42ae4a7cf6a0cbaf3" - version = "v0.20171020" - -[[projects]] - digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "NUT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "NUT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:6b4ef49506ebd7cc4fc6d55cc54716eefb63b38a0e90caa2644e534dd20907e9" - name = "github.com/olekukonko/tablewriter" - packages = ["."] - pruneopts = "NUT" - revision = "bdcc175572fd7abece6c831e643891b9331bc9e7" - -[[projects]] - digest = "1:9072181164e616e422cbfbe48ca9ac249a4d76301ca0876c9f56b937cf214a2f" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "NUT" - revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4" - -[[projects]] - digest = "1:a220f6dbf33a64b62870ff624b41d45731926411bda3cc26f62926c7e02c5448" - name = "github.com/pelletier/go-buffruneio" - packages = ["."] - pruneopts = "NUT" - revision = "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d" - version = "v0.1.0" - -[[projects]] - digest = "1:e89c934474e1d6330f141c99a3a2b14d9ee2c31081bbd9c9a88fc4e3503306ae" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "NUT" - revision = "0049ab3dc4c4c70a9eee23087437b69c0dde2130" - -[[projects]] - branch = "master" - digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" - name = "github.com/petar/GoLLRB" - packages = ["llrb"] - pruneopts = "NUT" - revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" - -[[projects]] - digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" - name = "github.com/peterbourgon/diskv" - packages = ["."] - pruneopts = "NUT" - revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" - version = "v2.0.1" - -[[projects]] - digest = "1:57c74655dc3cee7b4aad20cecdd934cd87ecb5efb0b6d397c872083d6bbb4aa4" - name = "github.com/pkg/browser" - packages = ["."] - pruneopts = "NUT" - revision = "9302be274faad99162b9d48ec97b24306872ebb0" - -[[projects]] - digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "NUT" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:06596f3d837b0daf29e787840cd53d82261f2f0d5f2cb0e7904a63d4096a7c63" - name = "github.com/pkg/profile" - packages = ["."] - pruneopts = "NUT" - revision = "3a8809bd8a80f8ecfe4ee1b34b3f37194968617c" - -[[projects]] - digest = "1:1e0d7268f17a54350ec80d9c5b85167cd244c4b25a77ddecd930b73c649474c8" - name = "github.com/pkg/sftp" - packages = ["."] - pruneopts = "NUT" - revision = "4d0e916071f68db74f8a73926335f809396d6b42" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "NUT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:1b6f62a965e4b2e004184bf2d38ef2915af240befa4d44e5f0e83925bcf89727" - name = "github.com/r2d4/external-storage" - packages = [ - "lib/controller", - "lib/leaderelection", - "lib/leaderelection/resourcelock", - ] - pruneopts = "NUT" - revision = "8c0e8605dc7b85893e144efd8e76d4a473f4bc7d" - -[[projects]] - digest = "1:118f00f400c10c1dd21a267fd04697c758dbe9a38cf5fa3fab3bb3625af9efe7" - name = "github.com/russross/blackfriday" - packages = ["."] - pruneopts = "NUT" - revision = "300106c228d52c8941d4b3de6054a6062a86dda3" - -[[projects]] - digest = "1:a35624c560f8e8f7959808245c79f0f31ad0fbbe245d7e58a07e1395a8c3138b" - name = "github.com/samalba/dockerclient" - packages = ["."] - pruneopts = "NUT" - revision = "91d7393ff85980ba3a8966405871a3d446ca28f2" - -[[projects]] - digest = "1:400359f0b394fb168f4aee9621d42cc005810c6e462009d5fc76055d5e96dcf3" - name = "github.com/shurcooL/sanitized_anchor_name" - packages = ["."] - pruneopts = "NUT" - revision = "10ef21a441db47d8b13ebcc5fd2310f636973c77" - -[[projects]] - digest = "1:414933e1ae2e9e19123e52fb29c97cb9d127d8a500f45a2ba80b537283a38b87" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "NUT" - revision = "89742aefa4b206dcf400792f3bd35b542998eb3b" - -[[projects]] - digest = "1:1af4e3c17f2df21ddefb311bf42c3a05178ba08097b18816c52f3b18a98b3ef3" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - "sftp", - ] - pruneopts = "NUT" - revision = "b28a7effac979219c2a2ed6205a4d70e4b1bcd02" - -[[projects]] - digest = "1:362766ce6d0361b1a364868f8441e5696cd6260b4aa4c23b609adafbaeee2024" - name = "github.com/spf13/cast" - packages = ["."] - pruneopts = "NUT" - revision = "e31f36ffc91a2ba9ddb72a4b6a607ff9b3d3cb63" - -[[projects]] - digest = "1:e1a684bfcf8e8e83166e5fb959e9be6dc43b9901c709a155bfef9da3f92dec48" - name = "github.com/spf13/cobra" - packages = [ - ".", - "doc", - ] - pruneopts = "NUT" - revision = "6644d46b81fa1831979c4cded0106e774e0ef0ab" - -[[projects]] - digest = "1:5664cfa541da6bd03dc6eb79c0639d3b43ba3ae997e694cd23cfd5da59160268" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - pruneopts = "NUT" - revision = "33c24e77fb80341fe7130ee7c594256ff08ccc46" - -[[projects]] - digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "NUT" - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - digest = "1:242e0ec8a8fd3af50454fb1f90eafd5af073ea7f458e9bbbada0bc08bfbcef57" - name = "github.com/spf13/viper" - packages = ["."] - pruneopts = "NUT" - revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" - version = "v1.0.0" - -[[projects]] - digest = "1:0dfe985a13070b2f191d1b6aaba2c737a781b0d0488c2337cd69cce107543ce8" - name = "github.com/xeipuuv/gojsonpointer" - packages = ["."] - pruneopts = "NUT" - revision = "e0fe6f68307607d540ed8eac07a342c33fa1b54a" - -[[projects]] - digest = "1:dffaacd300adca64381977995d07e91b53a55330ecd6835d9fa9947b7a4c0460" - name = "github.com/xeipuuv/gojsonreference" - packages = ["."] - pruneopts = "NUT" - revision = "e02fc20de94c78484cd5ffb007f8af96be030a45" - -[[projects]] - digest = "1:a35b3188691adc24932292a5e9606f8ddae9f732a5603bbb0456a48caf8a3edd" - name = "github.com/xeipuuv/gojsonschema" - packages = ["."] - pruneopts = "NUT" - revision = "c539bca196be50ccdd1f0bcd9076de95f9081831" - -[[projects]] - branch = "master" - digest = "1:48487a2da8f0d95a64d860167edbc4b67258813fd3c83c5269c62fe80161d700" - name = "github.com/zchee/go-vmnet" - packages = ["."] - pruneopts = "NUT" - revision = "97ebf91740978f1e665defc0a960fb997ebe282b" - -[[projects]] - digest = "1:14263791cdd53e1379d8363b21fe97145665737a81b73076901d204a26ab6b99" - name = "golang.org/x/crypto" - packages = [ - "curve25519", - "ed25519", - "ed25519/internal/edwards25519", - "ssh", - "ssh/terminal", - ] - pruneopts = "NUT" - revision = "81e90905daefcd6fd217b62423c0908922eadb30" - -[[projects]] - digest = "1:546efc585d0b3b4ef026faa94ae2d3211c1744d517841a27c83905e18fd0828d" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http2", - "http2/hpack", - "idna", - "lex/httplex", - ] - pruneopts = "NUT" - revision = "1c05540f6879653db88113bc4a2b70aec4bd491f" - -[[projects]] - branch = "master" - digest = "1:9822dde4525c2bc0130c4b8d209cb08b3ab68d4865972b20fe213fc2f732d9db" - name = "golang.org/x/oauth2" - packages = [ - ".", - "internal", - ] - pruneopts = "NUT" - revision = "5dab4167f31cbd76b407f1486c86b40748bc5073" - -[[projects]] - digest = "1:39485a034b89a9f2892b9c0cc24e2330e1c9981794c35281c2febfcc55b90a34" - name = "golang.org/x/sync" - packages = [ - "errgroup", - "syncmap", - ] - pruneopts = "NUT" - revision = "f52d1811a62927559de87708c8913c1650ce4f26" - -[[projects]] - digest = "1:fbad6d1464c4ff2c233f9f04181ff573960d469636b208b87458c8b5606b9b78" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - "windows/registry", - ] - pruneopts = "NUT" - revision = "95c6576299259db960f6c5b9b69ea52422860fce" - -[[projects]] - digest = "1:2780cbd603ca1ee6f33330fb3a590ebaea0e95b99c7759f4d17e2337cca9352a" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "feature/plural", - "internal", - "internal/catmsg", - "internal/colltab", - "internal/format", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/number", - "internal/stringset", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "message", - "message/catalog", - "number", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "NUT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - -[[projects]] - digest = "1:d37b0ef2944431fe9e8ef35c6fffc8990d9e2ca300588df94a6890f3649ae365" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "NUT" - revision = "f51c12702a4d776e4c1fa9b0fabab841babae631" - -[[projects]] - digest = "1:34c10243da5972105edd1b4b883e2bd918fbb3f73fbe14d6af6929e547173494" - name = "google.golang.org/appengine" - packages = [ - "internal", - "internal/base", - "internal/datastore", - "internal/log", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "NUT" - revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" - version = "v1.4.0" - -[[projects]] - digest = "1:f8dc0e20a5e1cbc728a21d00e0aef61b6101dda57e45ea1ed1ab148355864e5d" - name = "gopkg.in/cheggaaa/pb.v1" - packages = ["."] - pruneopts = "NUT" - revision = "dd61faab99a777c652bb680e37715fe0cb549856" - version = "v1.0.6" - -[[projects]] - digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "NUT" - revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - version = "v0.9.0" - -[[projects]] - digest = "1:b1f13c74112ab237c8b840517fbbcfb98d3690073caad6c5b1a56c29fea086e3" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "NUT" - revision = "670d4cfef0544295bc27a114dbac37980d83185a" - -[[projects]] - digest = "1:ef716a2116d8a040e16fbcd7fca71d3354915a94720de6af22c7a09970234296" - name = "k8s.io/api" - packages = [ - "admissionregistration/v1alpha1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "NUT" - revision = "2d6f90ab1293a1fb871cf149423ebb72aa7423aa" - version = "kubernetes-1.11.2" - -[[projects]] - digest = "1:678d0c42e830ad51a66627d60b92df6eb2de54cbfd46defc088f8a2fffcddb48" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/uuid", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/reflect", - ] - pruneopts = "NUT" - revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae" - version = "kubernetes-1.11.2" - -[[projects]] - branch = "master" - digest = "1:803a79a066abaeb85793b0ef857b2c73b9c5ad17a2aa1d95403d31a782f34d34" - name = "k8s.io/apiserver" - packages = ["pkg/util/feature"] - pruneopts = "NUT" - revision = "67c89284117046b26ecd3776eed2a39289399f15" - -[[projects]] - digest = "1:dad96edce9b0c12aed12753aac359465a221596fbc08e9f04a9a1b70ff596d5d" - name = "k8s.io/client-go" - packages = [ - "discovery", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1alpha1", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/core/v1/fake", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth/exec", - "rest", - "rest/watch", - "testing", - "tools/auth", - "tools/bootstrap/token/api", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/metrics", - "tools/pager", - "tools/record", - "tools/reference", - "transport", - "util/buffer", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/integer", - "util/retry", - ] - pruneopts = "NUT" - revision = "1f13a808da65775f22cbf47862c4e5898d8f4ca1" - version = "kubernetes-1.11.2" - -[[projects]] - digest = "1:e0d6dcb28c42a53c7243bb6380badd17f92fbd8488a075a07e984f91a07c0d23" - name = "k8s.io/kube-openapi" - packages = ["pkg/util/proto"] - pruneopts = "NUT" - revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" - -[[projects]] - digest = "1:fbc13b375bc3e2e321dc1229675466a5339aeb598690d435a8113190fd561b02" - name = "k8s.io/kubernetes" - packages = [ - "cmd/kubeadm/app/constants", - "cmd/kubeadm/app/features", - "pkg/apis/core", - "pkg/apis/core/helper", - "pkg/apis/core/v1/helper", - "pkg/registry/core/service/allocator", - "pkg/registry/core/service/ipallocator", - "pkg/util/goroutinemap", - "pkg/util/goroutinemap/exponentialbackoff", - "pkg/util/version", - ] - pruneopts = "NUT" - revision = "a4529464e4629c21224b3d52edfe0ea91b072862" - version = "v1.11.3" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/Parallels/docker-machine-parallels", - "github.com/blang/semver", - "github.com/cloudfoundry-attic/jibber_jabber", - "github.com/docker/go-units", - "github.com/docker/machine/drivers/hyperv", - "github.com/docker/machine/drivers/virtualbox", - "github.com/docker/machine/drivers/vmwarefusion", - "github.com/docker/machine/libmachine", - "github.com/docker/machine/libmachine/auth", - "github.com/docker/machine/libmachine/cert", - "github.com/docker/machine/libmachine/check", - "github.com/docker/machine/libmachine/drivers", - "github.com/docker/machine/libmachine/drivers/plugin", - "github.com/docker/machine/libmachine/drivers/plugin/localbinary", - "github.com/docker/machine/libmachine/engine", - "github.com/docker/machine/libmachine/host", - "github.com/docker/machine/libmachine/log", - "github.com/docker/machine/libmachine/mcnerror", - "github.com/docker/machine/libmachine/mcnflag", - "github.com/docker/machine/libmachine/mcnutils", - "github.com/docker/machine/libmachine/persist", - "github.com/docker/machine/libmachine/provision", - "github.com/docker/machine/libmachine/provision/pkgaction", - "github.com/docker/machine/libmachine/provision/serviceaction", - "github.com/docker/machine/libmachine/shell", - "github.com/docker/machine/libmachine/ssh", - "github.com/docker/machine/libmachine/state", - "github.com/docker/machine/libmachine/swarm", - "github.com/docker/machine/libmachine/version", - "github.com/golang/glog", - "github.com/google/go-cmp/cmp", - "github.com/google/go-cmp/cmp/cmpopts", - "github.com/google/go-containerregistry/pkg/authn", - "github.com/google/go-containerregistry/pkg/name", - "github.com/google/go-containerregistry/pkg/v1/remote", - "github.com/google/go-containerregistry/pkg/v1/tarball", - "github.com/google/go-github/github", - "github.com/hooklift/iso9660", - "github.com/jimmidyson/go-download", - "github.com/johanneswuerbach/nfsexports", - "github.com/libvirt/libvirt-go", - "github.com/machine-drivers/docker-machine-driver-vmware/pkg/drivers/vmware/config", - "github.com/mattn/go-isatty", - "github.com/mitchellh/go-ps", - "github.com/moby/hyperkit/go", - "github.com/olekukonko/tablewriter", - "github.com/pborman/uuid", - "github.com/pkg/browser", - "github.com/pkg/errors", - "github.com/pkg/profile", - "github.com/pmezard/go-difflib/difflib", - "github.com/r2d4/external-storage/lib/controller", - "github.com/sirupsen/logrus", - "github.com/spf13/cobra", - "github.com/spf13/cobra/doc", - "github.com/spf13/pflag", - "github.com/spf13/viper", - "github.com/xeipuuv/gojsonschema", - "github.com/zchee/go-vmnet", - "golang.org/x/crypto/ssh", - "golang.org/x/crypto/ssh/terminal", - "golang.org/x/oauth2", - "golang.org/x/sync/errgroup", - "golang.org/x/sync/syncmap", - "golang.org/x/sys/windows/registry", - "golang.org/x/text/language", - "golang.org/x/text/message", - "golang.org/x/text/number", - "k8s.io/api/apps/v1", - "k8s.io/api/core/v1", - "k8s.io/api/rbac/v1beta1", - "k8s.io/api/storage/v1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/intstr", - "k8s.io/apimachinery/pkg/util/net", - "k8s.io/apimachinery/pkg/util/strategicpatch", - "k8s.io/apimachinery/pkg/util/uuid", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/kubernetes/typed/core/v1/fake", - "k8s.io/client-go/rest", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/clientcmd/api", - "k8s.io/client-go/tools/clientcmd/api/latest", - "k8s.io/client-go/util/homedir", - "k8s.io/kubernetes/cmd/kubeadm/app/constants", - "k8s.io/kubernetes/cmd/kubeadm/app/features", - "k8s.io/kubernetes/pkg/apis/core", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 2796a10dc4c1..000000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,124 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/blang/semver" - version = "3.5.0" - -[[constraint]] - branch = "master" - source = "github.com/machine-drivers/machine" - name = "github.com/docker/machine" - -[[constraint]] - branch = "master" - name = "github.com/jimmidyson/go-download" - -[[constraint]] - branch = "master" - name = "github.com/johanneswuerbach/nfsexports" - -[[constraint]] - name = "github.com/libvirt/libvirt-go" - version = "3.4.0" - -[[constraint]] - name = "github.com/moby/hyperkit" - version = "0.20171020.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "0.8.0" - -[[constraint]] - branch = "master" - name = "github.com/r2d4/external-storage" - -[[constraint]] - name = "github.com/spf13/pflag" - version = "1.0.1" - -[[constraint]] - name = "github.com/spf13/viper" - version = "1.0.0" - -[[constraint]] - branch = "master" - name = "github.com/zchee/go-vmnet" - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.11.2" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.11.2" - -[[constraint]] - name = "k8s.io/client-go" - version = "kubernetes-1.11.2" - -[[constraint]] - name = "k8s.io/kubernetes" - version = "1.11.2" - -[[override]] - name = "k8s.io/api" - version = "kubernetes-1.11.2" - -[[override]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.11.2" - -[[override]] - name = "github.com/Azure/go-ansiterm" - revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" - -[[override]] - name = "github.com/json-iterator/go" - version = "1.1.3-22-gf2b4162" - -[[override]] - branch = "master" - name = "github.com/intel-go/cpuid" - -[prune] - go-tests = true - non-go = true - unused-packages = true - -[[constraint]] - name = "github.com/google/go-cmp" - version = "0.2.0" - -[[constraint]] - name = "golang.org/x/text" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - -[[constraint]] - name = "github.com/cloudfoundry-attic/jibber_jabber" - branch = "master" diff --git a/Makefile b/Makefile index f2b66c73f573..3f793592aaed 100755 --- a/Makefile +++ b/Makefile @@ -26,9 +26,9 @@ INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1) BUILDROOT_BRANCH ?= 2018.05 REGISTRY?=gcr.io/k8s-minikube -HYPERKIT_BUILD_IMAGE ?= karalabe/xgo-1.10.x -# NOTE: "latest" as of 2018-12-04. kube-cross images aren't updated as often as Kubernetes -BUILD_IMAGE ?= k8s.gcr.io/kube-cross:v1.11.1-1 +HYPERKIT_BUILD_IMAGE ?= karalabe/xgo-1.12.x +# NOTE: "latest" as of 2019-05-09. kube-cross images aren't updated as often as Kubernetes +BUILD_IMAGE ?= k8s.gcr.io/kube-cross:v1.12.5-1 ISO_BUILD_IMAGE ?= $(REGISTRY)/buildroot-image KVM_BUILD_IMAGE ?= $(REGISTRY)/kvm-build-image @@ -41,6 +41,7 @@ MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET} KERNEL_VERSION ?= 4.16.14 GO_VERSION ?= $(shell go version | cut -d' ' -f3 | sed -e 's/go//') +export GO111MODULE := on GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) @@ -48,9 +49,6 @@ GOPATH ?= $(shell go env GOPATH) BUILD_DIR ?= ./out $(shell mkdir -p $(BUILD_DIR)) -ORG := k8s.io -REPOPATH ?= $(ORG)/minikube - # Use system python if it exists, otherwise use Docker. PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube -w /minikube python python") BUILD_OS := $(shell uname -s) @@ -61,8 +59,6 @@ STORAGE_PROVISIONER_TAG := v1.8.1 MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) PROVISIONER_LDFLAGS := "$(MINIKUBE_LDFLAGS) -s -w" -MAKEDEPEND := GOPATH=$(GOPATH) ./makedepend.sh - MINIKUBEFILES := ./cmd/minikube/ HYPERKIT_FILES := ./cmd/drivers/hyperkit STORAGE_PROVISIONER_FILES := ./cmd/storage-provisioner @@ -82,7 +78,7 @@ SOURCE_PACKAGES = ./cmd/... ./pkg/... ./test/... # $(call DOCKER, image, command) define DOCKER - docker run --rm -e IN_DOCKER=1 --user $(shell id -u):$(shell id -g) -w /go/src/$(REPOPATH) -v $(GOPATH):/go --entrypoint /bin/bash $(1) -c '$(2)' + docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 --user $(shell id -u):$(shell id -g) -w /app -v $(PWD):/app -v $(GOPATH):/go --entrypoint /bin/bash $(1) -c '$(2)' endef ifeq ($(BUILD_IN_DOCKER),y) @@ -107,26 +103,10 @@ out/minikube$(IS_EXE): out/minikube-$(GOOS)-$(GOARCH)$(IS_EXE) out/minikube-windows-amd64.exe: out/minikube-windows-amd64 cp out/minikube-windows-amd64 out/minikube-windows-amd64.exe -out/minikube.d: pkg/minikube/assets/assets.go - $(MAKEDEPEND) out/minikube-$(GOOS)-$(GOARCH) $(ORG) $^ $(MINIKUBEFILES) > $@ - --include out/minikube.d - out/minikube-%: pkg/minikube/assets/assets.go ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) else -ifneq ($(GOPATH)/src/$(REPOPATH),$(CURDIR)) - $(warning ******************************************************************************) - $(warning WARNING: You are building minikube outside the expected GOPATH:) - $(warning ) - $(warning expected: $(GOPATH)/src/$(REPOPATH) ) - $(warning got: $(CURDIR) ) - $(warning ) - $(warning You will likely encounter unusual build failures. For proper setup, read: ) - $(warning https://github.com/kubernetes/minikube/blob/master/docs/contributors/build_guide.md) - $(warning ******************************************************************************) -endif GOOS="$(firstword $(subst -, ,$*))" GOARCH="$(lastword $(subst -, ,$*))" go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube endif @@ -174,15 +154,12 @@ iso_in_docker: --user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \ $(ISO_BUILD_IMAGE) /bin/bash -test-iso: - go test -v $(REPOPATH)/test/integration --tags=iso --minikube-args="--iso-url=file://$(shell pwd)/out/buildroot/output/images/rootfs.iso9660" +test-iso: pkg/minikube/assets/assets.go + go test -v ./test/integration --tags=iso --minikube-args="--iso-url=file://$(shell pwd)/out/buildroot/output/images/rootfs.iso9660" .PHONY: test-pkg -test-pkg/%: - go test -v -test.timeout=60m $(REPOPATH)/$* --tags="$(MINIKUBE_BUILD_TAGS)" - -.PHONY: depend -depend: out/minikube.d out/test.d out/docker-machine-driver-hyperkit.d out/storage-provisioner.d out/docker-machine-driver-kvm2.d +test-pkg/%: pkg/minikube/assets/assets.go + go test -v -test.timeout=60m ./$* --tags="$(MINIKUBE_BUILD_TAGS)" .PHONY: all all: cross drivers e2e-cross @@ -192,7 +169,7 @@ drivers: out/docker-machine-driver-hyperkit out/docker-machine-driver-kvm2 .PHONY: integration integration: out/minikube - go test -v -test.timeout=60m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" $(TEST_ARGS) + go test -v -test.timeout=60m ./test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" $(TEST_ARGS) .PHONY: integration-none-driver integration-none-driver: e2e-linux-$(GOARCH) out/minikube-linux-$(GOARCH) @@ -200,19 +177,15 @@ integration-none-driver: e2e-linux-$(GOARCH) out/minikube-linux-$(GOARCH) .PHONY: integration-versioned integration-versioned: out/minikube - go test -v -test.timeout=60m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS) versioned" $(TEST_ARGS) + go test -v -test.timeout=60m ./test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS) versioned" $(TEST_ARGS) .PHONY: test -out/test.d: pkg/minikube/assets/assets.go - $(MAKEDEPEND) -t test $(ORG) $^ $(MINIKUBE_TEST_FILES) > $@ - --include out/test.d -test: - GOPATH=$(GOPATH) ./test.sh +test: pkg/minikube/assets/assets.go + ./test.sh # Regenerates assets.go when template files have been updated pkg/minikube/assets/assets.go: $(shell find deploy/addons -type f) - which go-bindata || GOBIN=$(GOPATH)/bin go get github.com/jteeuwen/go-bindata/... + which go-bindata || GO111MODULE=off GOBIN=$(GOPATH)/bin go get github.com/jteeuwen/go-bindata/... PATH="$(PATH):$(GOPATH)/bin" go-bindata -nomemcopy -o pkg/minikube/assets/assets.go -pkg assets deploy/addons/... .PHONY: cross @@ -261,7 +234,7 @@ mdlint: @$(MARKDOWNLINT) $(MINIKUBE_MARKDOWN_FILES) out/docs/minikube.md: $(shell find cmd) $(shell find pkg/minikube/constants) pkg/minikube/assets/assets.go - cd $(GOPATH)/src/$(REPOPATH) && go run -ldflags="$(MINIKUBE_LDFLAGS)" hack/gen_help_text.go + go run -ldflags="$(MINIKUBE_LDFLAGS)" hack/help_text/gen_help_text.go out/minikube_$(DEB_VERSION).deb: out/minikube-linux-amd64 cp -r installers/linux/deb/minikube_deb_template out/minikube_$(DEB_VERSION) @@ -305,10 +278,6 @@ out/minikube-installer.exe: out/minikube-windows-amd64.exe mv out/windows_tmp/minikube-installer.exe out/minikube-installer.exe rm -rf out/windows_tmp -out/docker-machine-driver-hyperkit.d: - $(MAKEDEPEND) out/docker-machine-driver-hyperkit $(ORG) $^ $(HYPERKIT_FILES) > $@ - --include out/docker-machine-driver-hyperkit.d out/docker-machine-driver-hyperkit: ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(HYPERKIT_BUILD_IMAGE),CC=o64-clang CXX=o64-clang++ /usr/bin/make $@) @@ -332,10 +301,6 @@ $(ISO_BUILD_IMAGE): deploy/iso/minikube-iso/Dockerfile @echo "" @echo "$(@) successfully built" -out/storage-provisioner.d: - $(MAKEDEPEND) out/storage-provisioner $(ORG) $^ $(STORAGE_PROVISIONER_FILES) > $@ - --include out/storage-provisioner.d out/storage-provisioner: GOOS=linux go build -o $(BUILD_DIR)/storage-provisioner -ldflags=$(PROVISIONER_LDFLAGS) cmd/storage-provisioner/main.go @@ -377,10 +342,6 @@ release-minikube: out/minikube checksum gsutil cp out/minikube-$(GOOS)-$(GOARCH) $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH) gsutil cp out/minikube-$(GOOS)-$(GOARCH).sha256 $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH).sha256 -out/docker-machine-driver-kvm2.d: - $(MAKEDEPEND) out/docker-machine-driver-kvm2 $(ORG) $^ $(KVM_DRIVER_FILES) > $@ - --include out/docker-machine-driver-kvm2.d out/docker-machine-driver-kvm2: go build \ -installsuffix "static" \ @@ -399,8 +360,7 @@ $(KVM_BUILD_IMAGE): installers/linux/kvm/Dockerfile kvm_in_docker: docker inspect $(KVM_BUILD_IMAGE) || $(MAKE) $(KVM_BUILD_IMAGE) rm -f out/docker-machine-driver-kvm2 - docker run --rm -v $(PWD):/go/src/k8s.io/minikube $(KVM_BUILD_IMAGE) \ - /usr/bin/make -C /go/src/k8s.io/minikube out/docker-machine-driver-kvm2 + $(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make out/docker-machine-driver-kvm2) .PHONY: install-kvm install-kvm: out/docker-machine-driver-kvm2 diff --git a/docs/contributors/build_guide.md b/docs/contributors/build_guide.md index 8283d413b07f..f087fe5edb73 100644 --- a/docs/contributors/build_guide.md +++ b/docs/contributors/build_guide.md @@ -17,10 +17,10 @@ $ sudo dnf install -y glibc-static ### Building from Source -Clone minikube into your go path under `$GOPATH/src/k8s.io` +Clone and build minikube: ```shell -$ git clone https://github.com/kubernetes/minikube.git $GOPATH/src/k8s.io/minikube -$ cd $GOPATH/src/k8s.io/minikube +$ git clone https://github.com/kubernetes/minikube.git +$ cd minikube $ make ``` @@ -37,14 +37,13 @@ $ git clone https://github.com/kubernetes/minikube.git Build (cross compile for linux / OS X and Windows) using make: ```shell $ cd minikube -$ docker run --rm -v "$PWD":/go/src/k8s.io/minikube -w /go/src/k8s.io/minikube golang:stretch make cross +$ MINIKUBE_BUILD_IN_DOCKER=y make cross ``` Check "out" directory: ```shell $ ls out/ -docker-machine-driver-hyperkit.d minikube minikube.d test.d -docker-machine-driver-kvm2.d minikube-linux-amd64 storage-provisioner.d +minikube-darwin-amd64 minikube-linux-amd64 minikube-windows-amd64.exe ``` ### Run Instructions diff --git a/docs/contributors/minikube_iso.md b/docs/contributors/minikube_iso.md index b55d5ecbd2ea..d8fc153e76d4 100644 --- a/docs/contributors/minikube_iso.md +++ b/docs/contributors/minikube_iso.md @@ -25,7 +25,7 @@ Also be sure to have an UTF-8 locale set up in order to build the ISO. ### Build instructions ```shell -$ git clone https://github.com/kubernetes/minikube +$ git clone https://github.com/kubernetes/minikube.git $ cd minikube $ make buildroot-image $ make out/minikube.iso @@ -38,8 +38,7 @@ The bootable ISO image will be available in `out/minikube.iso`. ### Testing local minikube-iso changes ```shell -$ ./out/minikube start \ - --iso-url=file://$GOPATH/src/k8s.io/minikube/out/minikube.iso +$ ./out/minikube start --iso-url=file://$(pwd)/out/minikube.iso ``` ### Buildroot configuration diff --git a/docs/drivers.md b/docs/drivers.md index 1ac2ba1c74aa..8bae3b7085a8 100644 --- a/docs/drivers.md +++ b/docs/drivers.md @@ -63,16 +63,14 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine- && sudo install docker-machine-driver-kvm2 /usr/local/bin/ ``` -NOTE: Ubuntu users on a release older than 18.04, or anyone experiencing [#3206: Error creating new host: dial tcp: missing address.](https://github.com/kubernetes/minikube/issues/3206) you will need to build your own driver until [#3689](https://github.com/kubernetes/minikube/issues/3689) is resolved. Building this binary will require [Go v1.11](https://golang.org/dl/) or newer to be installed. +NOTE: Ubuntu users on a release older than 18.04, or anyone experiencing [#3206: Error creating new host: dial tcp: missing address.](https://github.com/kubernetes/minikube/issues/3206) you will need to build your own driver until [#3689](https://github.com/kubernetes/minikube/issues/3689) is resolved. Building this binary will require [Go v1.12](https://golang.org/dl/) or newer to be installed. ```shell -sudo apt install libvirt-dev -test -d $GOPATH/src/k8s.io/minikube || \ - git clone https://github.com/kubernetes/minikube.git $GOPATH/src/k8s.io/minikube -cd $GOPATH/src/k8s.io/minikube -git pull -make out/docker-machine-driver-kvm2 -sudo install out/docker-machine-driver-kvm2 /usr/local/bin +$ sudo apt install libvirt-dev +$ git clone https://github.com/kubernetes/minikube.git +$ cd minikube +$ make out/docker-machine-driver-kvm2 +$ sudo install out/docker-machine-driver-kvm2 /usr/local/bin ``` To use the kvm2 driver: diff --git a/go.mod b/go.mod new file mode 100644 index 000000000000..7b7724ea34cb --- /dev/null +++ b/go.mod @@ -0,0 +1,96 @@ +module k8s.io/minikube + +go 1.12 + +require ( + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 + github.com/Parallels/docker-machine-parallels v1.3.0 + github.com/blang/semver v3.5.0+incompatible + github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 + github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 + github.com/cpuguy83/go-md2man v1.0.4 + github.com/davecgh/go-spew v0.0.0-20170626231645-782f4967f2dc + github.com/docker/docker v0.0.0-20180917213351-bbe08dc7f0b9 + github.com/docker/go-units v0.0.0-20170127094116-9e638d38cf69 + github.com/docker/machine v0.16.1 + github.com/fsnotify/fsnotify v0.0.0-20160816051541-f12c6236fe7b + github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 + github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e + github.com/golang/glog v0.0.0-20141105023935-44145f04b68c + github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 + github.com/golang/protobuf v1.2.0 + github.com/google/btree v1.0.0 + github.com/google/go-cmp v0.2.0 + github.com/google/go-containerregistry v0.0.0-20190318164241-019cdfc6adf9 + github.com/google/go-github/v25 v25.0.2 + github.com/google/go-querystring v1.0.0 + github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 + github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 + github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce + github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604 + github.com/hashicorp/go-version v1.1.0 + github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880 + github.com/hashicorp/hcl v0.0.0-20160711231752-d8c773c4cba1 + github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8 + github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1 + github.com/inconshreveable/mousetrap v1.0.0 + github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 + github.com/jimmidyson/go-download v0.0.0-20161028105827-7f9a90c8c95b + github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 + github.com/json-iterator/go v1.1.5 + github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 + github.com/libvirt/libvirt-go v3.4.0+incompatible + github.com/machine-drivers/docker-machine-driver-vmware v0.1.1 + github.com/magiconair/properties v0.0.0-20160816085511-61b492c03cf4 + github.com/mattn/go-isatty v0.0.4 + github.com/mattn/go-runewidth v0.0.0-20161012013512-737072b4e32b + github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 + github.com/mitchellh/mapstructure v0.0.0-20170307201123-53818660ed49 + github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/olekukonko/tablewriter v0.0.0-20160923125401-bdcc175572fd + github.com/pborman/uuid v0.0.0-20150603214016-ca53cad383ca + github.com/pelletier/go-buffruneio v0.1.0 + github.com/pelletier/go-toml v0.0.0-20160822122712-0049ab3dc4c4 + github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c + github.com/peterbourgon/diskv v2.0.1+incompatible + github.com/pkg/browser v0.0.0-20160118053552-9302be274faa + github.com/pkg/errors v0.8.0 + github.com/pkg/profile v0.0.0-20161223203901-3a8809bd8a80 + github.com/pkg/sftp v0.0.0-20160930220758-4d0e916071f6 + github.com/pmezard/go-difflib v1.0.0 + github.com/r2d4/external-storage v0.0.0-20171222174501-8c0e8605dc7b + github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5 + github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 + github.com/shurcooL/sanitized_anchor_name v0.0.0-20151028001915-10ef21a441db + github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2 + github.com/spf13/afero v0.0.0-20160816080757-b28a7effac97 + github.com/spf13/cast v0.0.0-20160730092037-e31f36ffc91a + github.com/spf13/cobra v0.0.0-20180228053838-6644d46b81fa + github.com/spf13/jwalterweatherman v0.0.0-20160311093646-33c24e77fb80 + github.com/spf13/pflag v1.0.1 + github.com/spf13/viper v1.0.0 + github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 + github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c + github.com/xeipuuv/gojsonschema v0.0.0-20160623135812-c539bca196be + github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c + golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/text v0.3.2 + golang.org/x/time v0.0.0-20161028155119-f51c12702a4d + google.golang.org/appengine v1.4.0 + gopkg.in/cheggaaa/pb.v1 v1.0.6 + gopkg.in/inf.v0 v0.9.0 + gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054 + k8s.io/api v0.0.0-20180712090710-2d6f90ab1293 + k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d + k8s.io/apiserver v0.0.0-20180914001516-67c892841170 + k8s.io/client-go v0.0.0-20180806134042-1f13a808da65 + k8s.io/kube-openapi v0.0.0-20180216212618-50ae88d24ede + k8s.io/kubernetes v1.11.3 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000000..d001caa1bab8 --- /dev/null +++ b/go.sum @@ -0,0 +1,195 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Parallels/docker-machine-parallels v1.3.0 h1:RG1fyf3v1GwXMCeHRiZkB4tL9phFZEv6ixcvRZ1raN8= +github.com/Parallels/docker-machine-parallels v1.3.0/go.mod h1:HCOMm3Hulq/xuEVQMyZOuQlA+dSZpFY5kdCTZWjMVis= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 h1:+ziP/wVJWuAORkjv7386TRidVKY57X0bXBZFMeFlW+U= +github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9/go.mod h1:txokOny9wavBtq2PWuHmj1P+eFwpCsj+gQeNNANChfU= +github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:Yg2hDs4b13Evkpj42FU2idX2cVXVFqQSheXYKM86Qsk= +github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21/go.mod h1:MgJyK38wkzZbiZSKeIeFankxxSA8gayko/nr5x5bgBA= +github.com/cpuguy83/go-md2man v1.0.4 h1:OwjhDpK9YGCcI5CDf8HcdfsXqr6znFyAJfuZ27ixJsc= +github.com/cpuguy83/go-md2man v1.0.4/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY= +github.com/davecgh/go-spew v0.0.0-20170626231645-782f4967f2dc h1:0A0n6a0Y3vW5ktoWKC+ggkGXRzMJWMvqIYlFmsjwQzY= +github.com/davecgh/go-spew v0.0.0-20170626231645-782f4967f2dc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/docker v0.0.0-20180917213351-bbe08dc7f0b9 h1:dArdEP6A7F7aoAph4Gs505ME7QSBjjbRdpklFV384KU= +github.com/docker/docker v0.0.0-20180917213351-bbe08dc7f0b9/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.0.0-20170127094116-9e638d38cf69 h1:N4WAsrRIb+4U1yIwJO3FMrLnrr61ael894nygpViQTU= +github.com/docker/go-units v0.0.0-20170127094116-9e638d38cf69/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/machine v0.16.1 h1:zrgroZounGVkxLmBqMyc1uT2GgapXVjIWHCfBf0udrA= +github.com/docker/machine v0.16.1/go.mod h1:I8mPNDeK1uH+JTcUU7X0ZW8KiYz0jyAgNaeSJ1rCfDI= +github.com/fsnotify/fsnotify v0.0.0-20160816051541-f12c6236fe7b h1:lHoxUxMozh/yCASOoFep9dPMva62ztmxKK2VB8//Aoo= +github.com/fsnotify/fsnotify v0.0.0-20160816051541-f12c6236fe7b/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e h1:ago6fNuQ6IhszPsXkeU7qRCyfsIX7L67WDybsAPkLl8= +github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20141105023935-44145f04b68c h1:CbdkBQ1/PiAo0FYJhQGwASD8wrgNvTdf01g6+O9tNuA= +github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v0.0.0-20171021043952-1643683e1b54/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-containerregistry v0.0.0-20190318164241-019cdfc6adf9 h1:YVdfjqpMrLPm95Jc5JBFaEeGoPfhFxr43VW/UHQ2NVo= +github.com/google/go-containerregistry v0.0.0-20190318164241-019cdfc6adf9/go.mod h1:yZAFP63pRshzrEYLXLGPmUt0Ay+2zdjmMN1loCnRLUk= +github.com/google/go-github/v25 v25.0.2 h1:MqXE7nOlIF91NJ/PXAcvS2dC+XXCDbY7RvJzjyEPAoU= +github.com/google/go-github/v25 v25.0.2/go.mod h1:6z5pC69qHtrPJ0sXPsj4BLnd82b+r6sLB7qcBoRZqpw= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604 h1:VIq8E7fMiC4h3agg0ya56L0jHn7QisZZcWZXVKJb9jQ= +github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880 h1:OaRuzt9oCKNui8cCskZijoKUwe+aCuuCwvx1ox8FNyw= +github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20160711231752-d8c773c4cba1 h1:9j16AiR0R5hDbDBMzfUfIP9CUbbw6T8nYN4iZz3/wjg= +github.com/hashicorp/hcl v0.0.0-20160711231752-d8c773c4cba1/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8 h1:ARl0RuGZTqBOMXQIfXen0twVSJ8kMojd7ThJf4EBcrc= +github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8/go.mod h1:sOC47ru8lB0DlU0EZ7BJ0KCP5rDqOvx0c/5K5ADm8H0= +github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1 h1:FeeCi0I2Fu8kA8IXrdVPtGzym+mW9bzfj9f26EaES9k= +github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 h1:XboatR7lasl05yel5hNXF7kQBw2oFUGdMztcgisfhNU= +github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6/go.mod h1:RmeVYf9XrPRbRc3XIx0gLYA8qOFvNoPOfaEZduRlEp4= +github.com/jimmidyson/go-download v0.0.0-20161028105827-7f9a90c8c95b h1:3TknJxYSK1eDe21QorC3C2Yz8jylk6vlJG9YABnFzkU= +github.com/jimmidyson/go-download v0.0.0-20161028105827-7f9a90c8c95b/go.mod h1:I3WsAhNNoG7a/d8HMlYUywJJlfOs/+/83NEUjuDp4lc= +github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= +github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 h1:YUrU1/jxRqnt0PSrKj1Uj/wEjk/fjnE80QFfi2Zlj7Q= +github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169/go.mod h1:glhvuHOU9Hy7/8PwwdtnarXqLagOX0b/TbZx2zLMqEg= +github.com/libvirt/libvirt-go v3.4.0+incompatible h1:Cpyalgj1x8JIeTlL6SDYZBo7j8nY3+5XHqmi8DaunCk= +github.com/libvirt/libvirt-go v3.4.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= +github.com/machine-drivers/docker-machine-driver-vmware v0.1.1 h1:+E1IKKk+6kaQrCPg6edJZ/zISZijuZTPnzy6RE4C/Ho= +github.com/machine-drivers/docker-machine-driver-vmware v0.1.1/go.mod h1:ej014C83EmSnxJeJ8PtVb8OLJ91PJKO1Q8Y7sM5CK0o= +github.com/magiconair/properties v0.0.0-20160816085511-61b492c03cf4 h1:YVH4JcnWs1z/qQ2Dg5BnGGQL8PcUOO97Sb5w7RyuBl4= +github.com/magiconair/properties v0.0.0-20160816085511-61b492c03cf4/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.0-20161012013512-737072b4e32b h1:idzeyUe3K4aU/SIZWMykIkJJyTD7CgDkxUQEjV07fno= +github.com/mattn/go-runewidth v0.0.0-20161012013512-737072b4e32b/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 h1:kw1v0NlnN+GZcU8Ma8CLF2Zzgjfx95gs3/GN3vYAPpo= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/mapstructure v0.0.0-20170307201123-53818660ed49 h1:kaWdlw4YogwkDl8CG+/VxhXkrL9uz3n1D9QBC2pEGLE= +github.com/mitchellh/mapstructure v0.0.0-20170307201123-53818660ed49/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd h1:WDG9l//UGcGx4lqqEDY23+mRnQMKFY+8LD83OxQllRk= +github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd/go.mod h1:zGAVB/FkAf4ozkR8CCuj4LcVuErrNsj9APTDFvhOckw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/olekukonko/tablewriter v0.0.0-20160923125401-bdcc175572fd h1:nEatQ6JnwCT9iYD5uqYUiFqq8tJGX25to8KVKXqya7k= +github.com/olekukonko/tablewriter v0.0.0-20160923125401-bdcc175572fd/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/pborman/uuid v0.0.0-20150603214016-ca53cad383ca h1:dKRMHfduZ/ZqOHuYGk/0kkTIUbnyorkAfzLOp6Ts8pU= +github.com/pborman/uuid v0.0.0-20150603214016-ca53cad383ca/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pelletier/go-buffruneio v0.1.0 h1:ig6N9Cg71k/P+UUbhwdOFtJWz+qa8/3by7AzMprMWBM= +github.com/pelletier/go-buffruneio v0.1.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v0.0.0-20160822122712-0049ab3dc4c4 h1:tMVXZ04h5CqgTvMyA8IL1b9xlJz7G+mTcCsYi3WXRtA= +github.com/pelletier/go-toml v0.0.0-20160822122712-0049ab3dc4c4/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/browser v0.0.0-20160118053552-9302be274faa h1:od00Tr1U7+cLVtc+RNFmR53spHUF98Ziu33S8UIQnt0= +github.com/pkg/browser v0.0.0-20160118053552-9302be274faa/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v0.0.0-20161223203901-3a8809bd8a80 h1:DQFOykp5w+HOykOMzd2yOX5P6ty58Ggiu2rthHgcNQg= +github.com/pkg/profile v0.0.0-20161223203901-3a8809bd8a80/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v0.0.0-20160930220758-4d0e916071f6 h1:V8AT/I4KmIDRfObq0yBUvbD4DeaYmQY9GhC5sKl24Mo= +github.com/pkg/sftp v0.0.0-20160930220758-4d0e916071f6/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/r2d4/external-storage v0.0.0-20171222174501-8c0e8605dc7b h1:+wokSDzl6kjfWhVQsBhFOC2t4TYfdLfRXfWorEg3KUE= +github.com/r2d4/external-storage v0.0.0-20171222174501-8c0e8605dc7b/go.mod h1:/UlUhYuWiiitqIPbAxyU96i/wDlBS8sRHX2lRN+ffgs= +github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5 h1:+6eORf9Bt4C3Wjt91epyu6wvLW+P6+AEODb6uKgO+4g= +github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 h1:XRl74t6xHKI5EVIjDI5nPlHRq0bHED9/TjQuD8/UMkE= +github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859/go.mod h1:yeYR4SlaRZJct6lwNRKR+qd0CocnxxWDE7Vh5dxsn/w= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20151028001915-10ef21a441db h1:lrOUn8raSZS/V52c7elGaEyuogqSkEo/Qj2Auo2G1ik= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20151028001915-10ef21a441db/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2 h1:+8J/sCAVv2Y9Ct1BKszDFJEVWv6Aynr2O4FYGUg6+Mc= +github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/spf13/afero v0.0.0-20160816080757-b28a7effac97 h1:Gv1HykSEG+RKWWWkM69nPrJKhE/EM2oFb1nBWogHNv8= +github.com/spf13/afero v0.0.0-20160816080757-b28a7effac97/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v0.0.0-20160730092037-e31f36ffc91a h1:tPI5RnYZJhcXj0LhJ9pi7PS7gqOhuFR+4HEKyDz3BnQ= +github.com/spf13/cast v0.0.0-20160730092037-e31f36ffc91a/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cobra v0.0.0-20180228053838-6644d46b81fa h1:w+PYzMV4Hrxj0nSHUxY881YFte8hst14ZZ0ZNL3mlEA= +github.com/spf13/cobra v0.0.0-20180228053838-6644d46b81fa/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v0.0.0-20160311093646-33c24e77fb80 h1:evyGXhHMrxKBDkdlSPv9HMWV2o53o+Ibhm28BGc0450= +github.com/spf13/jwalterweatherman v0.0.0-20160311093646-33c24e77fb80/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I= +github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 h1:KM4T3G70MiR+JtqplcYkNVoNz7pDwYaBxWBXQK804So= +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c h1:XZWnr3bsDQWAZg4Ne+cPoXRPILrNlPNQfxBuwLl43is= +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20160623135812-c539bca196be h1:sRGd3e18izj1hQgF1hSvDOA8RPPnA2t4p8YeLZ/GdBU= +github.com/xeipuuv/gojsonschema v0.0.0-20160623135812-c539bca196be/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097 h1:Ucx5I1l1+TWXvqFmBigYu4Ub4MLvUuUU/whjoUvV95I= +github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097/go.mod h1:lFZSWRIpCfE/pt91hHBBpV6+x87YlCjsp+aIR2qCPPU= +golang.org/x/crypto v0.0.0-20170825220121-81e90905daef h1:R8ubLIilYRXIXpgjOg2l/ECVs3HzVKIjJEhxSsQ91u4= +golang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20170809000501-1c05540f6879/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c h1:pcBdqVcrlT+A3i+tWsOROFONQyey9tisIQHI4xqVGLg= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171031081856-95c657629925 h1:nCH33NboKIsT4HoXBsXTWX8ul303HxWgkc5s2Ezwacg= +golang.org/x/sys v0.0.0-20171031081856-95c657629925/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/cheggaaa/pb.v1 v1.0.6 h1:YQye4a1JysUfXYB6VihDfxb4lxOAei0xS44yN+srOew= +gopkg.in/cheggaaa/pb.v1 v1.0.6/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054 h1:ROF+R/wHHruzF40n5DfPv2jwm7rCJwvs8fz+RTZWjLE= +gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +k8s.io/api v0.0.0-20180712090710-2d6f90ab1293 h1:hROmpFC7JMobXFXMmD7ZKZLhDKvr1IKfFJoYS/45G/8= +k8s.io/api v0.0.0-20180712090710-2d6f90ab1293/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4= +k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiserver v0.0.0-20180914001516-67c892841170 h1:a9iLDj8v4x5/JpYsgKWSv3k6c5Q7M8dsyrAt4GLOzKQ= +k8s.io/apiserver v0.0.0-20180914001516-67c892841170/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/client-go v0.0.0-20180806134042-1f13a808da65 h1:kQX7jEIMYrWV9XqFN4usRaBLzCu7fd/qsCXxbgf3+9g= +k8s.io/client-go v0.0.0-20180806134042-1f13a808da65/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/kube-openapi v0.0.0-20180216212618-50ae88d24ede h1:YOWlONzJUq456SnNYPcK/org5asA+LU6AzNBm+l/04o= +k8s.io/kube-openapi v0.0.0-20180216212618-50ae88d24ede/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kubernetes v1.11.3 h1:49Pz+NOerox7nZ9F0m/fWv2MIuFDU5cDAoYMozurx1k= +k8s.io/kubernetes v1.11.3/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= diff --git a/hack/release_notes/listpullreqs.go b/hack/release_notes/listpullreqs.go index 39f1b56b2a04..34232171af1e 100644 --- a/hack/release_notes/listpullreqs.go +++ b/hack/release_notes/listpullreqs.go @@ -21,7 +21,7 @@ import ( "context" "fmt" - "github.com/google/go-github/github" + "github.com/google/go-github/v25/github" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/oauth2" diff --git a/installers/linux/kvm/Dockerfile b/installers/linux/kvm/Dockerfile index c138d9187672..9721824d1657 100644 --- a/installers/linux/kvm/Dockerfile +++ b/installers/linux/kvm/Dockerfile @@ -21,6 +21,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ pkg-config \ curl \ libvirt-dev \ + git \ && rm -rf /var/lib/apt/lists/* ARG GO_VERSION diff --git a/makedepend.sh b/makedepend.sh deleted file mode 100755 index 8263c7b160de..000000000000 --- a/makedepend.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -# Copyright 2018 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generate go dependencies, for make. Uses `go list`. -# Usage: makedepend.sh [-t] output package extra path... - -PATH_FORMAT='{{ .ImportPath }}{{"\n"}}{{join .Deps "\n"}}' -FILE_FORMAT='{{ range $file := .GoFiles }} {{$.Dir}}/{{$file}}{{"\n"}}{{end}}' - -if [ "$1" = "-t" ] -then - PATH_FORMAT='{{ if .TestGoFiles }} {{.ImportPath}} {{end}}' - shift -fi - -out=$1 -pkg=$2 -extra=$3 -paths="$4 $5 $6" - -# check for mandatory parameters -test -n "$out$pkg$paths" || exit 1 - -echo "$out: $extra\\" -go list -f "$PATH_FORMAT" $paths | - grep "$pkg" | - xargs go list -f "$FILE_FORMAT" | - sed -e "s|^ ${GOPATH}| \$(GOPATH)|;s/$/ \\\\/" -echo " #" diff --git a/test.sh b/test.sh index acd80f7a2916..0cfa6f7a69e5 100755 --- a/test.sh +++ b/test.sh @@ -16,8 +16,6 @@ set -e -REPO_PATH="k8s.io/minikube" - # Check for python on host, and use it if possible, otherwise fall back on python dockerized if [[ -f $(which python 2>&1) ]]; then PYTHON="python" @@ -31,7 +29,6 @@ COV_TMP_FILE=coverage_tmp.txt # Run "go test" on packages that have test files. Also create coverage profile echo "Running go tests..." -cd ${GOPATH}/src/${REPO_PATH} rm -f out/$COV_FILE || true echo "mode: count" > out/$COV_FILE for pkg in $(go list -f '{{ if .TestGoFiles }} {{.ImportPath}} {{end}}' ./cmd/... ./pkg/...); do diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1d85..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33bc9e..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c038..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index bcbe00d0c5ec..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 7ed5e01c3428..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 1c719db9e48c..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 6390abd231ba..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c202..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e94693a..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 593b10ab6963..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - oscState.parser.logf("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 03cec7ada62a..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -package ansiterm - -import ( - "errors" - "log" - "os" -) - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state - - logf func(string, ...interface{}) -} - -type Option func(*AnsiParser) - -func WithLogf(f func(string, ...interface{})) Option { - return func(ap *AnsiParser) { - ap.logf = f - } -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { - ap := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - for _, o := range opts { - o(ap) - } - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("ansiParser.log") - logger := log.New(logFile, "", log.LstdFlags) - if ap.logf != nil { - l := ap.logf - ap.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - ap.logf = logger.Printf - } - } - - if ap.logf == nil { - ap.logf = func(string, ...interface{}) {} - } - - ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} - ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} - ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} - ap.escape = escapeState{baseState{name: "Escape", parser: ap}} - ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} - ap.error = errorState{baseState{name: "Error", parser: ap}} - ap.ground = groundState{baseState{name: "Ground", parser: ap}} - ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} - - ap.stateMap = []state{ - ap.csiEntry, - ap.csiParam, - ap.dcsEntry, - ap.escape, - ap.escapeIntermediate, - ap.error, - ap.ground, - ap.oscString, - } - - ap.currState = getState(initialState, ap.stateMap) - - ap.logf("CreateParser: parser %p", ap) - return ap -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - ap.logf("WARNING: newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index de0a1f9cde3b..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 0bb5e51e9aa6..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,119 +0,0 @@ -package ansiterm - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - ap.logf("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - ap.logf("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) - ap.logf("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - ap.logf("Parsed params: %v with length: %d", params, len(params)) - - ap.logf("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd12da..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493a22..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index a6732797263f..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 6055e33b912b..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,327 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - ENABLE_AUTO_POSITION = 0x0100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - DISABLE_NEWLINE_AUTO_RETURN = 0x0008 - ENABLE_LVB_GRID_WORLDWIDE = 0x0010 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728f49..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index 3ee06ea72824..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - h.logf("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25efb..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 2d27fa1d0288..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d77ba..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 2d40fb75ad0b..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,743 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "log" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" -) - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD - logf func(string, ...interface{}) -} - -type Option func(*windowsAnsiEventHandler) - -func WithLogf(f func(string, ...interface{})) Option { - return func(w *windowsAnsiEventHandler) { - w.logf = f - } -} - -func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - h := &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } - for _, o := range opts { - o(h) - } - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("winEventHandler.log") - logger := log.New(logFile, "", log.LstdFlags) - if h.logf != nil { - l := h.logf - h.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - h.logf = logger.Printf - } - } - - if h.logf == nil { - h.logf = func(string, ...interface{}) {} - } - - return h -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - h.logf("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - h.logf("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - h.logf("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - h.logf("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - h.logf("set window failed: %v", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.logf("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - h.logf("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - h.logf("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - h.logf("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - h.logf("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - h.logf("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/github.com/Parallels/docker-machine-parallels/LICENSE b/vendor/github.com/Parallels/docker-machine-parallels/LICENSE deleted file mode 100644 index 4706ea39b2bc..000000000000 --- a/vendor/github.com/Parallels/docker-machine-parallels/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2015-2017 Parallels IP Holdings GmbH. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/Parallels/docker-machine-parallels/parallels.go b/vendor/github.com/Parallels/docker-machine-parallels/parallels.go deleted file mode 100644 index 25acb8bd70ad..000000000000 --- a/vendor/github.com/Parallels/docker-machine-parallels/parallels.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !darwin - -package parallels - -import "github.com/docker/machine/libmachine/drivers" - -func NewDriver(hostName, storePath string) drivers.Driver { - return drivers.NewDriverNotSupported("parallels", hostName, storePath) -} diff --git a/vendor/github.com/Parallels/docker-machine-parallels/parallels_darwin.go b/vendor/github.com/Parallels/docker-machine-parallels/parallels_darwin.go deleted file mode 100644 index a92bf892cc1c..000000000000 --- a/vendor/github.com/Parallels/docker-machine-parallels/parallels_darwin.go +++ /dev/null @@ -1,741 +0,0 @@ -package parallels - -import ( - "errors" - "fmt" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" - "github.com/hashicorp/go-version" -) - -const ( - isoFilename = "boot2docker.iso" - shareFolderName = "Users" - shareFolderPath = "/Users" - minDiskSize = 32 - defaultCPU = 1 - defaultMemory = 1024 - defaultVideoSize = 64 - defaultBoot2DockerURL = "" - defaultNoShare = false - defaultDiskSize = 20000 - defaultSSHPort = 22 - defaultSSHUser = "docker" -) - -var ( - reMachineNotFound = regexp.MustCompile(`Failed to get VM config: The virtual machine could not be found..*`) - reParallelsVersion = regexp.MustCompile(`.* (\d+\.\d+\.\d+).*`) - reParallelsEdition = regexp.MustCompile(`edition="(.+)"`) - - errMachineExist = errors.New("machine already exists") - errMachineNotExist = errors.New("machine does not exist") - errSharedNotConnected = errors.New("Your Mac host is not connected to Shared network. Please, enable this option: 'Parallels Desktop' -> 'Preferences' -> 'Network' -> 'Shared' -> 'Connect Mac to this network'") - - v10, _ = version.NewVersion("10.0.0") - v11, _ = version.NewVersion("11.0.0") -) - -// Driver for Parallels Desktop -type Driver struct { - *drivers.BaseDriver - CPU int - Memory int - VideoSize int - DiskSize int - Boot2DockerURL string - NoShare bool -} - -// NewDriver creates a new Parallels Desktop driver with default settings -func NewDriver(hostName, storePath string) drivers.Driver { - return &Driver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: hostName, - StorePath: storePath, - SSHUser: defaultSSHUser, - SSHPort: defaultSSHPort, - }, - CPU: defaultCPU, - Memory: defaultMemory, - VideoSize: defaultVideoSize, - DiskSize: defaultDiskSize, - Boot2DockerURL: defaultBoot2DockerURL, - NoShare: defaultNoShare, - } -} - -// Create a host using the driver's config -func (d *Driver) Create() error { - var ( - err error - ) - - b2dutils := mcnutils.NewB2dUtils(d.StorePath) - if err = b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil { - return err - } - - log.Infof("Creating SSH key...") - sshKeyPath := d.GetSSHKeyPath() - log.Debugf("SSH key: %s", sshKeyPath) - if err = ssh.GenerateSSHKey(sshKeyPath); err != nil { - return err - } - - log.Infof("Creating Parallels Desktop VM...") - - ver, err := getParallelsVersion() - if err != nil { - return err - } - - distribution := "boot2docker" - if ver.LessThan(v11) { - distribution = "linux-2.6" - } - - absStorePath, _ := filepath.Abs(d.ResolveStorePath(".")) - if err = prlctl("create", d.MachineName, - "--distribution", distribution, - "--dst", absStorePath, - "--no-hdd"); err != nil { - return err - } - - cpus := d.CPU - if cpus < 1 { - cpus = int(runtime.NumCPU()) - } - if cpus > 32 { - cpus = 32 - } - - videoSize := d.VideoSize - if videoSize < 2 { - videoSize = defaultVideoSize - } - - if err = prlctl("set", d.MachineName, - "--select-boot-device", "off", - "--cpus", fmt.Sprintf("%d", cpus), - "--memsize", fmt.Sprintf("%d", d.Memory), - "--videosize", fmt.Sprintf("%d", videoSize), - "--cpu-hotplug", "off", - "--on-window-close", "keep-running", - "--longer-battery-life", "on", - "--3d-accelerate", "off", - "--device-bootorder", "cdrom0"); err != nil { - return err - } - - absISOPath, _ := filepath.Abs(d.ResolveStorePath(isoFilename)) - if err = prlctl("set", d.MachineName, - "--device-set", "cdrom0", - "--iface", "sata", - "--position", "0", - "--image", absISOPath, - "--connect"); err != nil { - return err - } - - initialDiskSize := minDiskSize - - // Fix for [GH-67]. Create a bigger disk on Parallels Desktop 13.0.* - constraints, _ := version.NewConstraint(">= 13.0.0, < 13.1.0") - if constraints.Check(ver) { - initialDiskSize = 1891 - } - - // Create a small plain disk. It will be converted and expanded later - if err = prlctl("set", d.MachineName, - "--device-add", "hdd", - "--iface", "sata", - "--position", "1", - "--image", d.diskPath(), - "--type", "plain", - "--size", fmt.Sprintf("%d", initialDiskSize)); err != nil { - return err - } - - if err = d.generateDiskImage(d.DiskSize); err != nil { - return err - } - - // For Parallels Desktop >= 11.0.0 - if ver.Compare(v11) >= 0 { - // Enable headless mode - if err = prlctl("set", d.MachineName, - "--startup-view", "headless"); err != nil { - return err - } - - // Don't share any additional folders - if err = prlctl("set", d.MachineName, - "--shf-host-defined", "off"); err != nil { - return err - } - - // Enable time sync, don't touch timezone (this part is buggy) - if err = prlctl("set", d.MachineName, "--time-sync", "on"); err != nil { - return err - } - if err = prlctl("set", d.MachineName, - "--disable-timezone-sync", "on"); err != nil { - return err - } - } else { - // Disable time sync feature because it has an issue with timezones. - if err = prlctl("set", d.MachineName, "--time-sync", "off"); err != nil { - return err - } - } - - // Configure Shared Folders - if err = prlctl("set", d.MachineName, - "--shf-host", "on", - "--shared-cloud", "off", - "--shared-profile", "off", - "--smart-mount", "off"); err != nil { - return err - } - - if !d.NoShare { - if err = prlctl("set", d.MachineName, - "--shf-host-add", shareFolderName, - "--path", shareFolderPath); err != nil { - return err - } - } - - log.Infof("Starting Parallels Desktop VM...") - - // Don't use Start() since it expects to have a dhcp lease already - if err = prlctl("start", d.MachineName); err != nil { - return err - } - - var ip string - - log.Infof("Waiting for VM to come online...") - for i := 1; i <= 60; i++ { - ip, err = d.getIPfromDHCPLease() - if err != nil { - log.Debugf("Not there yet %d/%d, error: %s", i, 60, err) - time.Sleep(2 * time.Second) - continue - } - - if ip != "" { - log.Debugf("Got an ip: %s", ip) - conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, d.SSHPort), time.Duration(2*time.Second)) - if err != nil { - log.Debugf("SSH Daemon not responding yet: %s", err) - time.Sleep(2 * time.Second) - continue - } - conn.Close() - break - } - } - - if ip == "" { - return fmt.Errorf("Machine didn't return an IP after 120 seconds, aborting") - } - - d.IPAddress = ip - - if err := d.Start(); err != nil { - return err - } - - return nil -} - -// DriverName returns the name of the driver as it is registered -func (d *Driver) DriverName() string { - return "parallels" -} - -// GetIP returns an IP or hostname that this host is available at -// e.g. 1.2.3.4 or docker-host-d60b70a14d3a.cloudapp.net -func (d *Driver) GetIP() (string, error) { - // Assume that Parallels Desktop hosts don't have IPs unless they are running - s, err := d.GetState() - if err != nil { - return "", err - } - if s != state.Running { - return "", drivers.ErrHostIsNotRunning - } - - ip, err := d.getIPfromDHCPLease() - if err != nil { - return "", err - } - - return ip, nil -} - -// GetSSHHostname returns hostname for use with ssh -func (d *Driver) GetSSHHostname() (string, error) { - return d.GetIP() -} - -// GetURL returns a Docker compatible host URL for connecting to this host -// e.g. tcp://1.2.3.4:2376 -func (d *Driver) GetURL() (string, error) { - ip, err := d.GetIP() - if err != nil { - return "", err - } - if ip == "" { - return "", nil - } - return fmt.Sprintf("tcp://%s:2376", ip), nil -} - -// GetState returns the state that the host is in (running, stopped, etc) -func (d *Driver) GetState() (state.State, error) { - stdout, stderr, err := prlctlOutErr("list", d.MachineName, "--output", "status", "--no-header") - if err != nil { - if reMachineNotFound.FindString(stderr) != "" { - return state.Error, errMachineNotExist - } - return state.Error, err - } - - switch stdout { - case "running\n": - return state.Running, nil - case "paused\n": - return state.Paused, nil - case "suspended\n": - return state.Saved, nil - case "stopping\n": - return state.Stopping, nil - case "stopped\n": - return state.Stopped, nil - } - return state.None, nil -} - -// Kill stops a host forcefully -func (d *Driver) Kill() error { - return prlctl("stop", d.MachineName, "--kill") -} - -// PreCreateCheck allows for pre-create operations to make sure a driver is ready for creation -func (d *Driver) PreCreateCheck() error { - // Check platform type - if runtime.GOOS != "darwin" { - return fmt.Errorf("Driver \"parallels\" works only on OS X!") - } - - // Check Parallels Desktop version - ver, err := getParallelsVersion() - if err != nil { - return err - } - - if ver.LessThan(v10) { - return fmt.Errorf("Driver \"parallels\" supports only Parallels Desktop 10 and higher. You use: Parallels Desktop %s.", ver) - } - - if ver.LessThan(v11) { - log.Debugf("Found Parallels Desktop version: %s", ver) - log.Infof("Driver \"parallels\" integration with Parallels Desktop 10 is maintained by open source community.") - log.Infof("For Parallels supported configuration you should use it with Parallels Desktop 11 or later (Pro or Business edition).") - return nil - } - - // Check Parallels Desktop edition - edit, err := getParallelsEdition() - if err != nil { - return err - } - - log.Debugf("Found Parallels Desktop version: %d, edition: %s", ver, edit) - - switch edit { - case "pro", "business": - break - default: - return fmt.Errorf("Docker Machine can be used only with Parallels Desktop Pro or Business edition. You use: %s edition", edit) - } - - // Check whether the host is connected to Shared network - ok, err := isSharedConnected() - if err != nil { - return err - } - if !ok { - return errSharedNotConnected - } - - // Downloading boot2docker to cache should be done here to make sure - // that a download failure will not leave a machine half created. - b2dutils := mcnutils.NewB2dUtils(d.StorePath) - if err := b2dutils.UpdateISOCache(d.Boot2DockerURL); err != nil { - return err - } - - return nil -} - -// Remove a host -func (d *Driver) Remove() error { - s, err := d.GetState() - if err != nil { - if err == errMachineNotExist { - log.Infof("machine does not exist, assuming it has been removed already") - return nil - } - return err - } - if s == state.Running { - if err := d.Kill(); err != nil { - return err - } - } - return prlctl("delete", d.MachineName) -} - -// Restart a host. This may just call Stop(); Start() if the provider does not -// have any special restart behaviour. -func (d *Driver) Restart() error { - if err := d.Stop(); err != nil { - return err - } - return d.Start() -} - -// GetCreateFlags registers the flags this driver adds to -// "docker hosts create" -func (d *Driver) GetCreateFlags() []mcnflag.Flag { - return []mcnflag.Flag{ - mcnflag.IntFlag{ - EnvVar: "PARALLELS_MEMORY_SIZE", - Name: "parallels-memory", - Usage: "Size of memory for host in MB", - Value: defaultMemory, - }, - mcnflag.IntFlag{ - EnvVar: "PARALLELS_CPU_COUNT", - Name: "parallels-cpu-count", - Usage: "number of CPUs for the machine (-1 to use the number of CPUs available)", - Value: defaultCPU, - }, - mcnflag.IntFlag{ - EnvVar: "PARALLELS_VIDEO_SIZE", - Name: "parallels-video-size", - Usage: "Size of video for host in MB", - Value: defaultVideoSize, - }, - mcnflag.IntFlag{ - EnvVar: "PARALLELS_DISK_SIZE", - Name: "parallels-disk-size", - Usage: "Size of disk for host in MB", - Value: defaultDiskSize, - }, - mcnflag.StringFlag{ - EnvVar: "PARALLELS_BOOT2DOCKER_URL", - Name: "parallels-boot2docker-url", - Usage: "The URL of the boot2docker image. Defaults to the latest available version", - Value: defaultBoot2DockerURL, - }, - mcnflag.BoolFlag{ - Name: "parallels-no-share", - Usage: "Disable the mount of your home directory", - }, - } -} - -// SetConfigFromFlags configures the driver with the object that was returned -// by RegisterCreateFlags -func (d *Driver) SetConfigFromFlags(opts drivers.DriverOptions) error { - d.CPU = opts.Int("parallels-cpu-count") - d.Memory = opts.Int("parallels-memory") - d.VideoSize = opts.Int("parallels-video-size") - d.DiskSize = opts.Int("parallels-disk-size") - d.Boot2DockerURL = opts.String("parallels-boot2docker-url") - d.SetSwarmConfigFromFlags(opts) - d.SSHUser = defaultSSHUser - d.SSHPort = defaultSSHPort - d.NoShare = opts.Bool("parallels-no-share") - - return nil -} - -// Start a host -func (d *Driver) Start() error { - // Check whether the host is connected to Shared network - ok, err := isSharedConnected() - if err != nil { - return err - } - if !ok { - return errSharedNotConnected - } - - s, err := d.GetState() - if err != nil { - return err - } - - switch s { - case state.Stopped, state.Saved, state.Paused: - if err = prlctl("start", d.MachineName); err != nil { - return err - } - log.Infof("Waiting for VM to start...") - case state.Running: - break - default: - log.Infof("VM not in restartable state") - } - - if err = drivers.WaitForSSH(d); err != nil { - return err - } - - d.IPAddress, err = d.GetIP() - if err != nil { - return err - } - - // Mount Share Folder - if !d.NoShare { - if err := d.mountShareFolder(shareFolderName, shareFolderPath); err != nil { - return err - } - } - - return nil -} - -// Stop a host gracefully -func (d *Driver) Stop() error { - if err := prlctl("stop", d.MachineName); err != nil { - return err - } - for { - s, err := d.GetState() - if err != nil { - return err - } - if s == state.Running { - time.Sleep(1 * time.Second) - } else { - break - } - } - return nil -} - -func (d *Driver) getIPfromDHCPLease() (string, error) { - - DHCPLeaseFile := "/Library/Preferences/Parallels/parallels_dhcp_leases" - - stdout, _, err := prlctlOutErr("list", "-i", d.MachineName) - macRe := regexp.MustCompile("net0.* mac=([0-9A-F]{12}) card=.*") - macMatch := macRe.FindAllStringSubmatch(stdout, 1) - - if len(macMatch) != 1 { - return "", fmt.Errorf("MAC address for NIC: nic0 on Virtual Machine: %s not found!\n", d.MachineName) - } - mac := macMatch[0][1] - - if len(mac) != 12 { - return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac) - } - - leases, err := ioutil.ReadFile(DHCPLeaseFile) - if err != nil { - return "", err - } - - ipRe := regexp.MustCompile("(.*)=\"(.*),(.*)," + strings.ToLower(mac) + ",.*\"") - mostRecentIP := "" - mostRecentLease := uint64(0) - for _, l := range ipRe.FindAllStringSubmatch(string(leases), -1) { - ip := l[1] - expiry, _ := strconv.ParseUint(l[2], 10, 64) - leaseTime, _ := strconv.ParseUint(l[3], 10, 32) - log.Debugf("Found lease: %s for MAC: %s, expiring at %d, leased for %d s.\n", ip, mac, expiry, leaseTime) - if mostRecentLease <= expiry-leaseTime { - mostRecentIP = ip - mostRecentLease = expiry - leaseTime - } - } - - if len(mostRecentIP) == 0 { - return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, DHCPLeaseFile) - } - log.Debugf("Found IP lease: %s for MAC address %s\n", mostRecentIP, mac) - - return mostRecentIP, nil -} - -func (d *Driver) diskPath() string { - absDiskPath, _ := filepath.Abs(d.ResolveStorePath("disk.hdd")) - return absDiskPath -} - -func (d *Driver) mountShareFolder(shareName string, mountPoint string) error { - // Check the host path is available - if _, err := os.Stat(mountPoint); err != nil { - if os.IsNotExist(err) { - log.Infof("Host path '%s' does not exist. Skipping mount to VM...", mountPoint) - return nil - } - return err - } - - // Ensure that share is available on the guest side - checkCmd := "sudo modprobe prl_fs && grep -w " + shareName + " /proc/fs/prl_fs/sf_list" - if _, err := drivers.RunSSHCommandFromDriver(d, checkCmd); err != nil { - log.Infof("Shared folder '%s' is unavailable. Skipping mount to VM...", shareName) - return nil - } - - // Mount shared folder - mountCmd := "sudo mkdir -p " + mountPoint + " && sudo mount -t prl_fs " + shareName + " " + mountPoint - if _, err := drivers.RunSSHCommandFromDriver(d, mountCmd); err != nil { - return fmt.Errorf("Error mounting shared folder: %s", err) - } - - return nil -} - -// Make a boot2docker VM disk image. -func (d *Driver) generateDiskImage(size int) error { - tarBuf, err := mcnutils.MakeDiskImage(d.publicSSHKeyPath()) - if err != nil { - return err - } - - minSizeBytes := int64(minDiskSize) << 20 // usually won't fit in 32-bit int (max 2GB) - - //Expand the initial image if needed - if bufLen := int64(tarBuf.Len()); bufLen > minSizeBytes { - bufLenMBytes := bufLen>>20 + 1 - if err = prldisktool("resize", - "--hdd", d.diskPath(), - "--size", fmt.Sprintf("%d", bufLenMBytes)); err != nil { - return err - } - } - - // Find hds file - hdsList, err := filepath.Glob(d.diskPath() + "/*.hds") - if err != nil { - return err - } - if len(hdsList) == 0 { - return fmt.Errorf("Could not find *.hds image in %s", d.diskPath()) - } - hdsPath := hdsList[0] - log.Debugf("HDS image path: %s", hdsPath) - - // Write tar to the hds file - hds, err := os.OpenFile(hdsPath, os.O_WRONLY, 0644) - if err != nil { - return err - } - defer hds.Close() - hds.Seek(0, os.SEEK_SET) - _, err = hds.Write(tarBuf.Bytes()) - if err != nil { - return err - } - hds.Close() - - // Convert image to expanding type and resize it - if err := prldisktool("convert", "--expanding", "--merge", - "--hdd", d.diskPath()); err != nil { - return err - } - - if err := prldisktool("resize", - "--hdd", d.diskPath(), - "--size", fmt.Sprintf("%d", size)); err != nil { - return err - } - - return nil -} - -func (d *Driver) publicSSHKeyPath() string { - return d.GetSSHKeyPath() + ".pub" -} - -func detectCmdInPath(cmd string) string { - if path, err := exec.LookPath(cmd); err == nil { - return path - } - return cmd -} - -// Detects Parallels Desktop major version -func getParallelsVersion() (*version.Version, error) { - stdout, _, err := prlctlOutErr("--version") - if err != nil { - return nil, err - } - - // Parse Parallels Desktop version - verRaw := reParallelsVersion.FindStringSubmatch(string(stdout)) - if verRaw == nil { - return nil, fmt.Errorf("Parallels Desktop version could not be fetched: %s", stdout) - } - - ver, err := version.NewVersion(verRaw[1]) - if err != nil { - return nil, err - } - - return ver, nil -} - -// Detects Parallels Desktop edition -func getParallelsEdition() (string, error) { - stdout, _, err := prlsrvctlOutErr("info", "--license") - if err != nil { - return "", err - } - - // Parse Parallels Desktop version - res := reParallelsEdition.FindStringSubmatch(string(stdout)) - if res == nil { - return "", fmt.Errorf("Parallels Desktop edition could not be fetched!") - } - - return res[1], nil -} - -// Checks whether the host is connected to Shared network -func isSharedConnected() (bool, error) { - stdout, _, err := prlsrvctlOutErr("net", "info", "Shared") - if err != nil { - return false, err - } - - reSharedIsConnected := regexp.MustCompile(`Bound To:.*`) - return reSharedIsConnected.MatchString(stdout), nil -} diff --git a/vendor/github.com/Parallels/docker-machine-parallels/prlctl.go b/vendor/github.com/Parallels/docker-machine-parallels/prlctl.go deleted file mode 100644 index d88a8a5b5185..000000000000 --- a/vendor/github.com/Parallels/docker-machine-parallels/prlctl.go +++ /dev/null @@ -1,65 +0,0 @@ -package parallels - -import ( - "bytes" - "errors" - "os" - "os/exec" - "strings" - - "github.com/docker/machine/libmachine/log" -) - -var ( - prlctlCmd = detectCmdInPath("prlctl") - prlsrvctlCmd = detectCmdInPath("prlsrvctl") - prldisktoolCmd = detectCmdInPath("prl_disk_tool") - - errPrlctlNotFound = errors.New("Could not detect `prlctl` binary! Make sure Parallels Desktop Pro or Business edition is installed") - errPrlsrvctlNotFound = errors.New("Could not detect `prlsrvctl` binary! Make sure Parallels Desktop Pro or Business edition is installed") - errPrldisktoolNotFound = errors.New("Could not detect `prl_disk_tool` binary! Make sure Parallels Desktop Pro or Business edition is installed") -) - -func runCmd(cmdName string, args []string, notFound error) (string, string, error) { - cmd := exec.Command(cmdName, args...) - if os.Getenv("MACHINE_DEBUG") != "" { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout, cmd.Stderr = &stdout, &stderr - log.Debugf("executing: %v %v", cmdName, strings.Join(args, " ")) - - err := cmd.Run() - if err != nil { - if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { - err = notFound - } - } - return stdout.String(), stderr.String(), err -} - -func prlctl(args ...string) error { - _, _, err := runCmd(prlctlCmd, args, errPrlctlNotFound) - return err -} - -func prlctlOutErr(args ...string) (string, string, error) { - return runCmd(prlctlCmd, args, errPrlctlNotFound) -} - -func prlsrvctl(args ...string) error { - _, _, err := runCmd(prlsrvctlCmd, args, errPrlsrvctlNotFound) - return err -} - -func prlsrvctlOutErr(args ...string) (string, string, error) { - return runCmd(prlsrvctlCmd, args, errPrlsrvctlNotFound) -} - -func prldisktool(args ...string) error { - _, _, err := runCmd(prldisktoolCmd, args, errPrldisktoolNotFound) - return err -} diff --git a/vendor/github.com/Parallels/docker-machine-parallels/version.go b/vendor/github.com/Parallels/docker-machine-parallels/version.go deleted file mode 100644 index c061a1451719..000000000000 --- a/vendor/github.com/Parallels/docker-machine-parallels/version.go +++ /dev/null @@ -1,14 +0,0 @@ -package parallels - -import "fmt" - -// GitCommit that was compiled. This will be filled in by the compiler. -var GitCommit string - -// Version number that is being run at the moment. -const Version = "1.3.0" - -// FullVersion formats the version to be printed. -func FullVersion() string { - return fmt.Sprintf("%s, build %s", Version, GitCommit) -} diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86fcb02..000000000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c44940..000000000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d47939..000000000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842e6ac7..000000000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f880826ab..000000000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d802666e0..000000000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/c4milo/gotoolkit/LICENSE b/vendor/github.com/c4milo/gotoolkit/LICENSE deleted file mode 100644 index 52d135112eeb..000000000000 --- a/vendor/github.com/c4milo/gotoolkit/LICENSE +++ /dev/null @@ -1,374 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/c4milo/gotoolkit/queue.go b/vendor/github.com/c4milo/gotoolkit/queue.go deleted file mode 100644 index 6caf125895ff..000000000000 --- a/vendor/github.com/c4milo/gotoolkit/queue.go +++ /dev/null @@ -1,114 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at http://mozilla.org/MPL/2.0/. - -package gotoolkit - -import "errors" - -// Queue defines an interface for implementing Queue data structures. -type Queue interface { - Enqueue(item interface{}) - Dequeue() (interface{}, error) - Peek() (interface{}, error) - IsEmpty() bool - Size() uint64 -} - -// ListQueue implements a queue backed by a linked list, it may be faster than SliceQueue -// but consumes more memory and has worse cache locality than SliceQueue -type ListQueue struct { - first *node - last *node - size uint64 -} - -// Enqueue adds an element to the end of the queue. -func (q *ListQueue) Enqueue(item interface{}) { - last := new(node) - last.item = item - - if q.IsEmpty() { - q.first = last - } else { - oldLast := q.last - oldLast.next = last - } - q.last = last - q.size++ -} - -// Dequeue removes the first element from the queue. -func (q *ListQueue) Dequeue() (interface{}, error) { - if q.IsEmpty() { - q.last = nil - return nil, errors.New("unable to dequeue element, queue is empty") - } - - item := q.first.item - q.first = q.first.next - q.size-- - return item, nil -} - -// Peek returns the first element in the queue without removing it. -func (q *ListQueue) Peek() (interface{}, error) { - if q.IsEmpty() { - return nil, errors.New("unable to peek element, queue is empty") - } - return q.first.item, nil -} - -// IsEmpty returns whether the queue is empty or not. -func (q *ListQueue) IsEmpty() bool { - return q.first == nil -} - -// Size returns the number of elements in the queue. -func (q *ListQueue) Size() uint64 { - return q.size -} - -// SliceQueue implements a queue backed by a growing slice. Useful for memory -// constrained environments. It also has better cache locality than ListQueue -type SliceQueue struct { - size uint64 - backing []interface{} -} - -// Enqueue adds an element to the end of the queue. -func (s *SliceQueue) Enqueue(item interface{}) { - s.size++ - s.backing = append(s.backing, item) -} - -// Peek returns the first element of the queue without removing it from the queue. -func (s *SliceQueue) Peek() (interface{}, error) { - if s.IsEmpty() { - return nil, errors.New("unable to peek element, queue is empty") - } - return s.backing[0], nil -} - -// Dequeue removes and return the first element from the queue. -func (s *SliceQueue) Dequeue() (interface{}, error) { - if s.IsEmpty() { - return nil, errors.New("unable to dequeue element, queue is empty") - } - - item := s.backing[0] - // https://github.com/golang/go/wiki/SliceTricks - s.backing = append(s.backing[:0], s.backing[1:]...) - s.size-- - return item, nil -} - -// IsEmpty returns whether or not the queue is empty. -func (s *SliceQueue) IsEmpty() bool { - return len(s.backing) == 0 -} - -// Size returns the number of elements in the queue. -func (s *SliceQueue) Size() uint64 { - return s.size -} diff --git a/vendor/github.com/c4milo/gotoolkit/stack.go b/vendor/github.com/c4milo/gotoolkit/stack.go deleted file mode 100644 index 8314cb370cbd..000000000000 --- a/vendor/github.com/c4milo/gotoolkit/stack.go +++ /dev/null @@ -1,110 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at http://mozilla.org/MPL/2.0/. - -package gotoolkit - -import "errors" - -// Stack defines an interface for implementing Stack data structures. -type Stack interface { - Push(item interface{}) - Pop() (interface{}, error) - Peek() (interface{}, error) - IsEmpty() bool - Size() uint64 -} - -type node struct { - item interface{} - next *node -} - -// ListStack is a stack backed by a linked list, it may be faster than SliceStack -// but consumes more memory and has worse cache locality than SliceQueue. -type ListStack struct { - first *node - size uint64 -} - -// Push adds an element to the top of the stack. -func (s *ListStack) Push(item interface{}) { - oldFirst := s.first - s.first = new(node) - s.first.item = item - s.first.next = oldFirst - s.size++ -} - -// Peek returns the latest added element without removing it from the stack. -func (s *ListStack) Peek() (interface{}, error) { - if s.IsEmpty() { - return nil, errors.New("unable to peek element, stack is empty") - } - - return s.first.item, nil -} - -// Pop removes and return the latest added element from the stack. -func (s *ListStack) Pop() (interface{}, error) { - if s.IsEmpty() { - return nil, errors.New("unable to pop element, stack is empty") - } - item := s.first.item - s.first = s.first.next - s.size-- - return item, nil -} - -// IsEmpty returns whether or not the stack is empty. -func (s *ListStack) IsEmpty() bool { - return s.first == nil -} - -// Size returns the number of elements in the stack. -func (s *ListStack) Size() uint64 { - return s.size -} - -// SliceStack implements a stack backed by a growing slice. Useful for memory -// constrained environments. It also has better cache locality. -type SliceStack struct { - size uint64 - backing []interface{} -} - -// Push adds an element to the top of the stack. -func (s *SliceStack) Push(item interface{}) { - s.size++ - s.backing = append(s.backing, item) -} - -// Peek returns the latest added element without removing it from the stack. -func (s *SliceStack) Peek() (interface{}, error) { - if s.IsEmpty() { - return nil, errors.New("unable to peek element, stack is empty") - } - return s.backing[s.size-1], nil -} - -// Pop removes and return the latest added element from the stack. -func (s *SliceStack) Pop() (interface{}, error) { - if s.IsEmpty() { - return nil, errors.New("unable to pop element, stack is empty") - } - - s.size-- - item := s.backing[s.size] - s.backing = s.backing[0:s.size] - return item, nil -} - -// IsEmpty returns whether or not the stack is empty. -func (s *SliceStack) IsEmpty() bool { - return len(s.backing) == 0 -} - -// Size returns the number of elements in the stack. -func (s *SliceStack) Size() uint64 { - return s.size -} diff --git a/vendor/github.com/cloudfoundry-attic/jibber_jabber/LICENSE b/vendor/github.com/cloudfoundry-attic/jibber_jabber/LICENSE deleted file mode 100644 index 915b208920b4..000000000000 --- a/vendor/github.com/cloudfoundry-attic/jibber_jabber/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2014 Pivotal - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber.go b/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber.go deleted file mode 100644 index 45d288ea87a4..000000000000 --- a/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber.go +++ /dev/null @@ -1,22 +0,0 @@ -package jibber_jabber - -import ( - "strings" -) - -const ( - COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE = "Could not detect Language" -) - -func splitLocale(locale string) (string, string) { - formattedLocale := strings.Split(locale, ".")[0] - formattedLocale = strings.Replace(formattedLocale, "-", "_", -1) - - pieces := strings.Split(formattedLocale, "_") - language := pieces[0] - territory := "" - if len(pieces) > 1 { - territory = strings.Split(formattedLocale, "_")[1] - } - return language, territory -} diff --git a/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber_unix.go b/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber_unix.go deleted file mode 100644 index 374d7617630a..000000000000 --- a/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber_unix.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build darwin freebsd linux netbsd openbsd - -package jibber_jabber - -import ( - "errors" - "os" - "strings" -) - -func getLangFromEnv() (locale string) { - locale = os.Getenv("LC_ALL") - if locale == "" { - locale = os.Getenv("LANG") - } - return -} - -func getUnixLocale() (unix_locale string, err error) { - unix_locale = getLangFromEnv() - if unix_locale == "" { - err = errors.New(COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE) - } - - return -} - -func DetectIETF() (locale string, err error) { - unix_locale, err := getUnixLocale() - if err == nil { - language, territory := splitLocale(unix_locale) - locale = language - if territory != "" { - locale = strings.Join([]string{language, territory}, "-") - } - } - - return -} - -func DetectLanguage() (language string, err error) { - unix_locale, err := getUnixLocale() - if err == nil { - language, _ = splitLocale(unix_locale) - } - - return -} - -func DetectTerritory() (territory string, err error) { - unix_locale, err := getUnixLocale() - if err == nil { - _, territory = splitLocale(unix_locale) - } - - return -} diff --git a/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber_windows.go b/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber_windows.go deleted file mode 100644 index 1acd96c38b12..000000000000 --- a/vendor/github.com/cloudfoundry-attic/jibber_jabber/jibber_jabber_windows.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build windows - -package jibber_jabber - -import ( - "errors" - "syscall" - "unsafe" -) - -const LOCALE_NAME_MAX_LENGTH uint32 = 85 - -var SUPPORTED_LOCALES = map[uintptr]string{ - 0x0407: "de-DE", - 0x0409: "en-US", - 0x0c0a: "es-ES", //or is it 0x040a - 0x040c: "fr-FR", - 0x0410: "it-IT", - 0x0411: "ja-JA", - 0x0412: "ko_KR", - 0x0416: "pt-BR", - //0x0419: "ru_RU", - Will add support for Russian when nicksnyder/go-i18n supports Russian - 0x0804: "zh-CN", - 0x0c04: "zh-HK", - 0x0404: "zh-TW", -} - -func getWindowsLocaleFrom(sysCall string) (locale string, err error) { - buffer := make([]uint16, LOCALE_NAME_MAX_LENGTH) - - dll := syscall.MustLoadDLL("kernel32") - proc := dll.MustFindProc(sysCall) - r, _, dllError := proc.Call(uintptr(unsafe.Pointer(&buffer[0])), uintptr(LOCALE_NAME_MAX_LENGTH)) - if r == 0 { - err = errors.New(COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE + ":\n" + dllError.Error()) - return - } - - locale = syscall.UTF16ToString(buffer) - - return -} - -func getAllWindowsLocaleFrom(sysCall string) (string, error) { - dll, err := syscall.LoadDLL("kernel32") - if err != nil { - return "", errors.New("Could not find kernel32 dll") - } - - proc, err := dll.FindProc(sysCall) - if err != nil { - return "", err - } - - locale, _, dllError := proc.Call() - if locale == 0 { - return "", errors.New(COULD_NOT_DETECT_PACKAGE_ERROR_MESSAGE + ":\n" + dllError.Error()) - } - - return SUPPORTED_LOCALES[locale], nil -} - -func getWindowsLocale() (locale string, err error) { - dll, err := syscall.LoadDLL("kernel32") - if err != nil { - return "", errors.New("Could not find kernel32 dll") - } - - proc, err := dll.FindProc("GetVersion") - if err != nil { - return "", err - } - - v, _, _ := proc.Call() - windowsVersion := byte(v) - isVistaOrGreater := (windowsVersion >= 6) - - if isVistaOrGreater { - locale, err = getWindowsLocaleFrom("GetUserDefaultLocaleName") - if err != nil { - locale, err = getWindowsLocaleFrom("GetSystemDefaultLocaleName") - } - } else if !isVistaOrGreater { - locale, err = getAllWindowsLocaleFrom("GetUserDefaultLCID") - if err != nil { - locale, err = getAllWindowsLocaleFrom("GetSystemDefaultLCID") - } - } else { - panic(v) - } - return -} -func DetectIETF() (locale string, err error) { - locale, err = getWindowsLocale() - return -} - -func DetectLanguage() (language string, err error) { - windows_locale, err := getWindowsLocale() - if err == nil { - language, _ = splitLocale(windows_locale) - } - - return -} - -func DetectTerritory() (territory string, err error) { - windows_locale, err := getWindowsLocale() - if err == nil { - _, territory = splitLocale(windows_locale) - } - - return -} diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md deleted file mode 100644 index 1cade6cef6a1..000000000000 --- a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go deleted file mode 100644 index 8f44fa155064..000000000000 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go +++ /dev/null @@ -1,19 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday" -) - -func Render(doc []byte) []byte { - renderer := RoffRenderer(0) - extensions := 0 - extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS - extensions |= blackfriday.EXTENSION_TABLES - extensions |= blackfriday.EXTENSION_FENCED_CODE - extensions |= blackfriday.EXTENSION_AUTOLINK - extensions |= blackfriday.EXTENSION_SPACE_HEADERS - extensions |= blackfriday.EXTENSION_FOOTNOTES - extensions |= blackfriday.EXTENSION_TITLEBLOCK - - return blackfriday.Markdown(doc, renderer, extensions) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go deleted file mode 100644 index 4478786b7b6d..000000000000 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go +++ /dev/null @@ -1,269 +0,0 @@ -package md2man - -import ( - "bytes" - "fmt" - "html" - "strings" - - "github.com/russross/blackfriday" -) - -type roffRenderer struct{} - -func RoffRenderer(flags int) blackfriday.Renderer { - return &roffRenderer{} -} - -func (r *roffRenderer) GetFlags() int { - return 0 -} - -func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { - out.WriteString(".TH ") - - splitText := bytes.Split(text, []byte("\n")) - for i, line := range splitText { - line = bytes.TrimPrefix(line, []byte("% ")) - if i == 0 { - line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) - line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) - } - line = append([]byte("\""), line...) - line = append(line, []byte("\" ")...) - out.Write(line) - } - - out.WriteString(" \"\"\n") -} - -func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { - out.WriteString("\n.PP\n.RS\n\n.nf\n") - escapeSpecialChars(out, text) - out.WriteString("\n.fi\n.RE\n") -} - -func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { - out.WriteString("\n.PP\n.RS\n") - out.Write(text) - out.WriteString("\n.RE\n") -} - -func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { - marker := out.Len() - - switch { - case marker == 0: - // This is the doc header - out.WriteString(".TH ") - case level == 1: - out.WriteString("\n\n.SH ") - case level == 2: - out.WriteString("\n.SH ") - default: - out.WriteString("\n.SS ") - } - - if !text() { - out.Truncate(marker) - return - } -} - -func (r *roffRenderer) HRule(out *bytes.Buffer) { - out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") -} - -func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { - marker := out.Len() - out.WriteString(".IP ") - if flags&blackfriday.LIST_TYPE_ORDERED != 0 { - out.WriteString("\\(bu 2") - } else { - out.WriteString("\\n+[step" + string(flags) + "]") - } - out.WriteString("\n") - if !text() { - out.Truncate(marker) - return - } - -} - -func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { - out.WriteString("\n\\item ") - out.Write(text) -} - -func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { - marker := out.Len() - out.WriteString("\n.PP\n") - if !text() { - out.Truncate(marker) - return - } - if marker != 0 { - out.WriteString("\n") - } -} - -// TODO: This might now work -func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { - out.WriteString(".TS\nallbox;\n") - - out.Write(header) - out.Write(body) - out.WriteString("\n.TE\n") -} - -func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { - if out.Len() > 0 { - out.WriteString("\n") - } - out.Write(text) - out.WriteString("\n") -} - -func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { - if out.Len() > 0 { - out.WriteString(" ") - } - out.Write(text) - out.WriteString(" ") -} - -// TODO: This is probably broken -func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { - if out.Len() > 0 { - out.WriteString("\t") - } - out.Write(text) - out.WriteString("\t") -} - -func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { - -} - -func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { - -} - -func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { - out.WriteString("\n\\[la]") - out.Write(link) - out.WriteString("\\[ra]") -} - -func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { - out.WriteString("\\fB\\fC") - escapeSpecialChars(out, text) - out.WriteString("\\fR") -} - -func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\fB") - out.Write(text) - out.WriteString("\\fP") -} - -func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\fI") - out.Write(text) - out.WriteString("\\fP") -} - -func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { -} - -func (r *roffRenderer) LineBreak(out *bytes.Buffer) { - out.WriteString("\n.br\n") -} - -func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - r.AutoLink(out, link, 0) -} - -func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { - out.Write(tag) -} - -func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\s+2") - out.Write(text) - out.WriteString("\\s-2") -} - -func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { -} - -func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { - -} - -func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { - out.WriteString(html.UnescapeString(string(entity))) -} - -func processFooterText(text []byte) []byte { - text = bytes.TrimPrefix(text, []byte("% ")) - newText := []byte{} - textArr := strings.Split(string(text), ") ") - - for i, w := range textArr { - if i == 0 { - w = strings.Replace(w, "(", "\" \"", 1) - w = fmt.Sprintf("\"%s\"", w) - } else { - w = fmt.Sprintf(" \"%s\"", w) - } - newText = append(newText, []byte(w)...) - } - newText = append(newText, []byte(" \"\"")...) - - return newText -} - -func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { - escapeSpecialChars(out, text) -} - -func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { -} - -func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { -} - -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - -func escapeSpecialChars(out *bytes.Buffer, text []byte) { - for i := 0; i < len(text); i++ { - // directly copy normal characters - org := i - - for i < len(text) && !needsBackslash(text[i]) { - i++ - } - if i > org { - out.Write(text[org:i]) - } - - // escape a character - if i >= len(text) { - break - } - out.WriteByte('\\') - out.WriteByte(text[i]) - } -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index c836416192da..000000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 8a4a6589a2d4..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 1fe3cf3d5d10..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce945761..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f31202..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1e1e..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index df1d582a728a..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index c49875bacbb8..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e3388253..000000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index c6c8fb40e398..000000000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2016 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron.L.Xu -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Adam Avilla -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Walz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Alejandro González Hevia -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Anil Madhavapeddy -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -Arash Deshmeh -ArikaChen -Arnaud Lefebvre -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Yolken -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Bin Liu -Bingshen Wang -Blake Geno -Boaz Shuster -bobby abbott -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengguang Xu -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris McKinnel -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Corbin Coleman -Corey Farrell -Cory Forsyth -cressie176 -CrimsonGlory -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -CUI Wei -Cyprian Gracz -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Williams -Dani Louca -Daniel Antlinger -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Danny Berger -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Lawrence -David Lechner -David M. Karr -David Mackey -David Mat -David Mcanulty -David McKay -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -epeterso -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Ethan Bell -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -gautam, prasanna -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -haikuoliu -Hakan Özler -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -Hyzhou Zhy -Iago López Galeiras -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Zhang -Jie Luo -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Galasyn -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -jimmyxian -Jinsoo Park -Jiri Popelka -Jiuyue Ma -Jiří Župka -jjy -jmzwcn -Joao Fernandes -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard (VM) -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Kun Zhang -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Frank -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Chao <932819864@qq.com> -Lee, Meng-Han -leeplay -Lei Gong -Lei Jitang -Len Weincier -Lennie -Leo Gallucci -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -Liao Qingwei -Lily Guo -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Opter -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Luciano Mores -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minář -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Stenning -Nick Stinemates -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Milovanov -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Ribeiro -Pavel Lobashov -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Phil -Phil Estes -Phil Spitler -Philip Alexander Etling -Philip Monroe -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Pure White -pysqz -Qiang Huang -Qinglan Peng -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Moyse -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Stern -Robert Terhaar -Robert Wallis -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Strashkin -Ron Smits -Ron Williams -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Lopes -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shev Yan -Shih-Yuan Lee -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -shuai-z -Shukui Yang -Shuwei Hao -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simei He -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Stanislav Bondarenko -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephan Spindler -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Tabakhase -Tadej Janež -TAGOMORI Satoshi -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timfeirg -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Tõnis Tiigi -tpng -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -vanderliang -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vlastimil Zeman -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Ward Vandewege -WarheadsSE -Wassim Dhif -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -Wentao Zhang -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -WiseTrem -Wolfgang Powisch -Wonjun Kim -xamyzhao -Xianglin Gao -Xianlu Bird -XiaoBing Jiang -Xiaoxu Chen -Xiaoyu Zhang -xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -YAMADA Tsuyoshi -Yamasaki Masahide -Yan Feng -Yang Bai -Yang Pengfei -yangchenliang -Yanqiang Miao -Yao Zaiyong -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -Youcef YEKHLEF -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yuhao Fang -Yuichiro Kaneko -Yunxiang Huang -Yurii Rashkovskii -Yves Junqueira -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenkun Bi -Zhou Hao -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb676b..000000000000 --- a/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 0c74e15b057f..000000000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE deleted file mode 100644 index e67cdabd22e5..000000000000 --- a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 Honza Pokorny -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/docs/static_files/contributors.png b/vendor/github.com/docker/docker/docs/static_files/contributors.png deleted file mode 100644 index 63c0a0c09b58..000000000000 Binary files a/vendor/github.com/docker/docker/docs/static_files/contributors.png and /dev/null differ diff --git a/vendor/github.com/docker/docker/hack/generate-authors.sh b/vendor/github.com/docker/docker/hack/generate-authors.sh deleted file mode 100755 index 680bdb7b3f74..000000000000 --- a/vendor/github.com/docker/docker/hack/generate-authors.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." - -# see also ".mailmap" for how email addresses and names are deduplicated - -{ - cat <<-'EOH' - # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. - EOH - echo - git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf -} > AUTHORS diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem deleted file mode 120000 index 70a3e6ce54d6..000000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem +++ /dev/null @@ -1 +0,0 @@ -../../../integration/testdata/https/ca.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem deleted file mode 120000 index 458882026e45..000000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem +++ /dev/null @@ -1 +0,0 @@ -../../../integration/testdata/https/client-cert.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem deleted file mode 120000 index d5f6bbee571d..000000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem +++ /dev/null @@ -1 +0,0 @@ -../../../integration/testdata/https/client-key.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem deleted file mode 120000 index c18601067aa7..000000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem +++ /dev/null @@ -1 +0,0 @@ -../../../integration/testdata/https/server-cert.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem deleted file mode 120000 index 48b9c2df6586..000000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem +++ /dev/null @@ -1 +0,0 @@ -../../../integration/testdata/https/server-key.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE deleted file mode 100644 index 5d80670bc0f8..000000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD deleted file mode 100644 index 2ee8768d3156..000000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go deleted file mode 100644 index 87bca8d4acdb..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, key[0]) - } - } - return codes, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go deleted file mode 100644 index da733e58484c..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/proxy.go +++ /dev/null @@ -1,78 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (int, error) { - nr, err := r.r.Read(buf) - - if len(r.escapeKeys) == 0 { - return nr, err - } - - preserve := func() { - // this preserves the original key presses in the passed in buffer - nr += r.escapeKeyPos - preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf...) - r.escapeKeyPos = 0 - copy(buf[0:nr], preserve) - } - - if nr != 1 || err != nil { - if r.escapeKeyPos > 0 { - preserve() - } - return nr, err - } - - if buf[0] != r.escapeKeys[r.escapeKeyPos] { - if r.escapeKeyPos > 0 { - preserve() - } - return nr, nil - } - - if r.escapeKeyPos == len(r.escapeKeys)-1 { - return 0, EscapeError{} - } - - // Looks like we've got an escape key, but we need to match again on the next - // read. - // Store the current escape key we found so we can look for the next one on - // the next read. - // Since this is an escape key, make sure we don't let the caller read it - // If later on we find that this is not the escape sequence, we'll add the - // keys back - r.escapeKeyPos++ - return nr - r.escapeKeyPos, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go deleted file mode 100644 index 01bcaa8abb18..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go deleted file mode 100644 index 0589a955194b..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term // import "github.com/docker/docker/pkg/term" - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index a3c3db131574..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,221 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "io" - "os" - "os/signal" - "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) - } - - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStdout = true - } else { - winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStderr = true - } else { - winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and - // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as - // go-ansiterm hasn't switch to x/sys/windows. - // TODO: switch back to x/sys/windows once go-ansiterm has switched - if emulateStdin { - stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windowsconsole.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windowsconsole.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this - // version of Windows. - winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go deleted file mode 100644 index 48b16f52039c..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build darwin freebsd openbsd netbsd - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TIOCGETA - setTermios = unix.TIOCSETA -) - -// Termios is the Unix API for terminal I/O. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - newState.Oflag &^= unix.OPOST - newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - newState.Cflag &^= (unix.CSIZE | unix.PARENB) - newState.Cflag |= unix.CS8 - newState.Cc[unix.VMIN] = 1 - newState.Cc[unix.VTIME] = 0 - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go deleted file mode 100644 index 6d4c63fdb75e..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ /dev/null @@ -1,39 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - termios, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - - var oldState State - oldState.termios = Termios(*termios) - - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 1d7c452cc845..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build windows - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 7799a03fc59e..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index 527401975805..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "os" - - "github.com/Azure/go-ansiterm/winterm" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index 3e5593ca6a68..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "io/ioutil" - "os" - "sync" - - "github.com/Azure/go-ansiterm" - "github.com/sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go deleted file mode 100644 index a19663ad834b..000000000000 --- a/vendor/github.com/docker/docker/pkg/term/winsize.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "golang.org/x/sys/unix" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) -} diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTING.md b/vendor/github.com/docker/docker/project/CONTRIBUTING.md deleted file mode 120000 index 44fcc6343937..000000000000 --- a/vendor/github.com/docker/docker/project/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3162..000000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index ba02af26dc5c..000000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 46 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index 44616c271877..000000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index 5ac7fd825fce..000000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,118 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/docker/machine/LICENSE b/vendor/github.com/docker/machine/LICENSE deleted file mode 100644 index 27448585ad49..000000000000 --- a/vendor/github.com/docker/machine/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/machine/commands/mcndirs/utils.go b/vendor/github.com/docker/machine/commands/mcndirs/utils.go deleted file mode 100644 index 3181e7c86c4a..000000000000 --- a/vendor/github.com/docker/machine/commands/mcndirs/utils.go +++ /dev/null @@ -1,27 +0,0 @@ -package mcndirs - -import ( - "os" - "path/filepath" - - "github.com/docker/machine/libmachine/mcnutils" -) - -var ( - BaseDir = os.Getenv("MACHINE_STORAGE_PATH") -) - -func GetBaseDir() string { - if BaseDir == "" { - BaseDir = filepath.Join(mcnutils.GetHomeDir(), ".docker", "machine") - } - return BaseDir -} - -func GetMachineDir() string { - return filepath.Join(GetBaseDir(), "machines") -} - -func GetMachineCertDir() string { - return filepath.Join(GetBaseDir(), "certs") -} diff --git a/vendor/github.com/docker/machine/drivers/errdriver/error.go b/vendor/github.com/docker/machine/drivers/errdriver/error.go deleted file mode 100644 index 532203771bea..000000000000 --- a/vendor/github.com/docker/machine/drivers/errdriver/error.go +++ /dev/null @@ -1,104 +0,0 @@ -package errdriver - -import ( - "fmt" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" -) - -type Driver struct { - Name string -} - -type NotLoadable struct { - Name string -} - -func (e NotLoadable) Error() string { - return fmt.Sprintf("Driver %q not found. Do you have the plugin binary accessible in your PATH?", e.Name) -} - -func NewDriver(Name string) drivers.Driver { - return &Driver{ - Name: Name, - } -} - -// DriverName returns the name of the driver -func (d *Driver) DriverName() string { - return "not-found" -} - -func (d *Driver) PreCreateCheck() error { - return NotLoadable{d.Name} -} - -func (d *Driver) GetCreateFlags() []mcnflag.Flag { - return nil -} - -func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error { - return NotLoadable{d.Name} -} - -func (d *Driver) GetURL() (string, error) { - return "", NotLoadable{d.Name} -} - -func (d *Driver) GetMachineName() string { - return d.Name -} - -func (d *Driver) GetIP() (string, error) { - return "1.2.3.4", NotLoadable{d.Name} -} - -func (d *Driver) GetSSHHostname() (string, error) { - return "", NotLoadable{d.Name} -} - -func (d *Driver) GetSSHKeyPath() string { - return "" -} - -func (d *Driver) GetSSHPort() (int, error) { - return 0, NotLoadable{d.Name} -} - -func (d *Driver) GetSSHUsername() string { - return "" -} - -func (d *Driver) GetState() (state.State, error) { - return state.Error, NotLoadable{d.Name} -} - -func (d *Driver) Create() error { - return NotLoadable{d.Name} -} - -func (d *Driver) Remove() error { - return NotLoadable{d.Name} -} - -func (d *Driver) Start() error { - return NotLoadable{d.Name} -} - -func (d *Driver) Stop() error { - return NotLoadable{d.Name} -} - -func (d *Driver) Restart() error { - return NotLoadable{d.Name} -} - -func (d *Driver) Kill() error { - return NotLoadable{d.Name} -} - -func (d *Driver) Upgrade() error { - return NotLoadable{d.Name} -} diff --git a/vendor/github.com/docker/machine/drivers/hyperv/hyperv.go b/vendor/github.com/docker/machine/drivers/hyperv/hyperv.go deleted file mode 100644 index a980cf11f4df..000000000000 --- a/vendor/github.com/docker/machine/drivers/hyperv/hyperv.go +++ /dev/null @@ -1,492 +0,0 @@ -package hyperv - -import ( - "fmt" - "net" - "os" - "time" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" -) - -type Driver struct { - *drivers.BaseDriver - Boot2DockerURL string - VSwitch string - DiskSize int - MemSize int - CPU int - MacAddr string - VLanID int - DisableDynamicMemory bool -} - -const ( - defaultDiskSize = 20000 - defaultMemory = 1024 - defaultCPU = 1 - defaultVLanID = 0 - defaultDisableDynamicMemory = false -) - -// NewDriver creates a new Hyper-v driver with default settings. -func NewDriver(hostName, storePath string) *Driver { - return &Driver{ - DiskSize: defaultDiskSize, - MemSize: defaultMemory, - CPU: defaultCPU, - DisableDynamicMemory: defaultDisableDynamicMemory, - BaseDriver: &drivers.BaseDriver{ - MachineName: hostName, - StorePath: storePath, - }, - } -} - -// GetCreateFlags registers the flags this driver adds to -// "docker hosts create" -func (d *Driver) GetCreateFlags() []mcnflag.Flag { - return []mcnflag.Flag{ - mcnflag.StringFlag{ - Name: "hyperv-boot2docker-url", - Usage: "URL of the boot2docker ISO. Defaults to the latest available version.", - EnvVar: "HYPERV_BOOT2DOCKER_URL", - }, - mcnflag.StringFlag{ - Name: "hyperv-virtual-switch", - Usage: "Virtual switch name. Defaults to first found.", - EnvVar: "HYPERV_VIRTUAL_SWITCH", - }, - mcnflag.IntFlag{ - Name: "hyperv-disk-size", - Usage: "Maximum size of dynamically expanding disk in MB.", - Value: defaultDiskSize, - EnvVar: "HYPERV_DISK_SIZE", - }, - mcnflag.IntFlag{ - Name: "hyperv-memory", - Usage: "Memory size for host in MB.", - Value: defaultMemory, - EnvVar: "HYPERV_MEMORY", - }, - mcnflag.IntFlag{ - Name: "hyperv-cpu-count", - Usage: "number of CPUs for the machine", - Value: defaultCPU, - EnvVar: "HYPERV_CPU_COUNT", - }, - mcnflag.StringFlag{ - Name: "hyperv-static-macaddress", - Usage: "Hyper-V network adapter's static MAC address.", - EnvVar: "HYPERV_STATIC_MACADDRESS", - }, - mcnflag.IntFlag{ - Name: "hyperv-vlan-id", - Usage: "Hyper-V network adapter's VLAN ID if any", - Value: defaultVLanID, - EnvVar: "HYPERV_VLAN_ID", - }, - mcnflag.BoolFlag{ - Name: "hyperv-disable-dynamic-memory", - Usage: "Disable dynamic memory management setting", - EnvVar: "HYPERV_DISABLE_DYNAMIC_MEMORY", - }, - } -} - -func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error { - d.Boot2DockerURL = flags.String("hyperv-boot2docker-url") - d.VSwitch = flags.String("hyperv-virtual-switch") - d.DiskSize = flags.Int("hyperv-disk-size") - d.MemSize = flags.Int("hyperv-memory") - d.CPU = flags.Int("hyperv-cpu-count") - d.MacAddr = flags.String("hyperv-static-macaddress") - d.VLanID = flags.Int("hyperv-vlan-id") - d.SSHUser = "docker" - d.DisableDynamicMemory = flags.Bool("hyperv-disable-dynamic-memory") - d.SetSwarmConfigFromFlags(flags) - - return nil -} - -func (d *Driver) GetSSHHostname() (string, error) { - return d.GetIP() -} - -// DriverName returns the name of the driver -func (d *Driver) DriverName() string { - return "hyperv" -} - -func (d *Driver) GetURL() (string, error) { - ip, err := d.GetIP() - if err != nil { - return "", err - } - - if ip == "" { - return "", nil - } - - return fmt.Sprintf("tcp://%s", net.JoinHostPort(ip, "2376")), nil -} - -func (d *Driver) GetState() (state.State, error) { - stdout, err := cmdOut("(", "Hyper-V\\Get-VM", d.MachineName, ").state") - if err != nil { - return state.None, fmt.Errorf("Failed to find the VM status") - } - - resp := parseLines(stdout) - if len(resp) < 1 { - return state.None, nil - } - - switch resp[0] { - case "Running": - return state.Running, nil - case "Off": - return state.Stopped, nil - default: - return state.None, nil - } -} - -// PreCreateCheck checks that the machine creation process can be started safely. -func (d *Driver) PreCreateCheck() error { - // Check that powershell was found - if powershell == "" { - return ErrPowerShellNotFound - } - - // Check that hyperv is installed - if err := hypervAvailable(); err != nil { - return err - } - - // Check that the user is an Administrator - isAdmin, err := isAdministrator() - if err != nil { - return err - } - if !isAdmin { - return ErrNotAdministrator - } - - // Check that there is a virtual switch already configured - if _, err := d.chooseVirtualSwitch(); err != nil { - return err - } - - // Downloading boot2docker to cache should be done here to make sure - // that a download failure will not leave a machine half created. - b2dutils := mcnutils.NewB2dUtils(d.StorePath) - err = b2dutils.UpdateISOCache(d.Boot2DockerURL) - return err -} - -func (d *Driver) Create() error { - b2dutils := mcnutils.NewB2dUtils(d.StorePath) - if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil { - return err - } - - log.Infof("Creating SSH key...") - if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil { - return err - } - - log.Infof("Creating VM...") - virtualSwitch, err := d.chooseVirtualSwitch() - if err != nil { - return err - } - - log.Infof("Using switch %q", virtualSwitch) - - diskImage, err := d.generateDiskImage() - if err != nil { - return err - } - - if err := cmd("Hyper-V\\New-VM", - d.MachineName, - "-Path", fmt.Sprintf("'%s'", d.ResolveStorePath(".")), - "-SwitchName", quote(virtualSwitch), - "-MemoryStartupBytes", toMb(d.MemSize)); err != nil { - return err - } - if d.DisableDynamicMemory { - if err := cmd("Hyper-V\\Set-VMMemory", - "-VMName", d.MachineName, - "-DynamicMemoryEnabled", "$false"); err != nil { - return err - } - } - - if d.CPU > 1 { - if err := cmd("Hyper-V\\Set-VMProcessor", - d.MachineName, - "-Count", fmt.Sprintf("%d", d.CPU)); err != nil { - return err - } - } - - if d.MacAddr != "" { - if err := cmd("Hyper-V\\Set-VMNetworkAdapter", - "-VMName", d.MachineName, - "-StaticMacAddress", fmt.Sprintf("\"%s\"", d.MacAddr)); err != nil { - return err - } - } - - if d.VLanID > 0 { - if err := cmd("Hyper-V\\Set-VMNetworkAdapterVlan", - "-VMName", d.MachineName, - "-Access", - "-VlanId", fmt.Sprintf("%d", d.VLanID)); err != nil { - return err - } - } - - if err := cmd("Hyper-V\\Set-VMDvdDrive", - "-VMName", d.MachineName, - "-Path", quote(d.ResolveStorePath("boot2docker.iso"))); err != nil { - return err - } - - if err := cmd("Hyper-V\\Add-VMHardDiskDrive", - "-VMName", d.MachineName, - "-Path", quote(diskImage)); err != nil { - return err - } - - log.Infof("Starting VM...") - return d.Start() -} - -func (d *Driver) chooseVirtualSwitch() (string, error) { - if d.VSwitch == "" { - // Default to the first external switche and in the process avoid DockerNAT - stdout, err := cmdOut("[Console]::OutputEncoding = [Text.Encoding]::UTF8; (Hyper-V\\Get-VMSwitch -SwitchType External).Name") - if err != nil { - return "", err - } - - switches := parseLines(stdout) - - if len(switches) < 1 { - return "", fmt.Errorf("no External vswitch found. A valid vswitch must be available for this command to run. Check https://docs.docker.com/machine/drivers/hyper-v/") - } - - return switches[0], nil - } - - stdout, err := cmdOut("[Console]::OutputEncoding = [Text.Encoding]::UTF8; (Hyper-V\\Get-VMSwitch).Name") - if err != nil { - return "", err - } - - switches := parseLines(stdout) - - found := false - for _, name := range switches { - if name == d.VSwitch { - found = true - break - } - } - - if !found { - return "", fmt.Errorf("vswitch %q not found", d.VSwitch) - } - - return d.VSwitch, nil -} - -// waitForIP waits until the host has a valid IP -func (d *Driver) waitForIP() (string, error) { - log.Infof("Waiting for host to start...") - - for { - ip, _ := d.GetIP() - if ip != "" { - return ip, nil - } - - time.Sleep(1 * time.Second) - } -} - -// waitStopped waits until the host is stopped -func (d *Driver) waitStopped() error { - log.Infof("Waiting for host to stop...") - - for { - s, err := d.GetState() - if err != nil { - return err - } - - if s != state.Running { - return nil - } - - time.Sleep(1 * time.Second) - } -} - -// Start starts an host -func (d *Driver) Start() error { - if err := cmd("Hyper-V\\Start-VM", d.MachineName); err != nil { - return err - } - - ip, err := d.waitForIP() - if err != nil { - return err - } - - d.IPAddress = ip - - return nil -} - -// Stop stops an host -func (d *Driver) Stop() error { - if err := cmd("Hyper-V\\Stop-VM", d.MachineName); err != nil { - return err - } - - if err := d.waitStopped(); err != nil { - return err - } - - d.IPAddress = "" - - return nil -} - -// Remove removes an host -func (d *Driver) Remove() error { - s, err := d.GetState() - if err != nil { - return err - } - - if s == state.Running { - if err := d.Kill(); err != nil { - return err - } - } - - return cmd("Hyper-V\\Remove-VM", d.MachineName, "-Force") -} - -// Restart stops and starts an host -func (d *Driver) Restart() error { - err := d.Stop() - if err != nil { - return err - } - - return d.Start() -} - -// Kill force stops an host -func (d *Driver) Kill() error { - if err := cmd("Hyper-V\\Stop-VM", d.MachineName, "-TurnOff"); err != nil { - return err - } - - if err := d.waitStopped(); err != nil { - return err - } - - d.IPAddress = "" - - return nil -} - -func (d *Driver) GetIP() (string, error) { - s, err := d.GetState() - if err != nil { - return "", err - } - if s != state.Running { - return "", drivers.ErrHostIsNotRunning - } - - stdout, err := cmdOut("((", "Hyper-V\\Get-VM", d.MachineName, ").networkadapters[0]).ipaddresses[0]") - if err != nil { - return "", err - } - - resp := parseLines(stdout) - if len(resp) < 1 { - return "", fmt.Errorf("IP not found") - } - - return resp[0], nil -} - -func (d *Driver) publicSSHKeyPath() string { - return d.GetSSHKeyPath() + ".pub" -} - -// generateDiskImage creates a small fixed vhd, put the tar in, convert to dynamic, then resize -func (d *Driver) generateDiskImage() (string, error) { - diskImage := d.ResolveStorePath("disk.vhd") - fixed := d.ResolveStorePath("fixed.vhd") - - // Resizing vhds requires administrator privileges - // incase the user is only a hyper-v admin then create the disk at the target size to avoid resizing. - isWindowsAdmin, err := isWindowsAdministrator() - if err != nil { - return "", err - } - fixedDiskSize := "10MB" - if !isWindowsAdmin { - fixedDiskSize = toMb(d.DiskSize) - } - - log.Infof("Creating VHD") - if err := cmd("Hyper-V\\New-VHD", "-Path", quote(fixed), "-SizeBytes", fixedDiskSize, "-Fixed"); err != nil { - return "", err - } - - tarBuf, err := mcnutils.MakeDiskImage(d.publicSSHKeyPath()) - if err != nil { - return "", err - } - - file, err := os.OpenFile(fixed, os.O_WRONLY, 0644) - if err != nil { - return "", err - } - defer file.Close() - - file.Seek(0, os.SEEK_SET) - _, err = file.Write(tarBuf.Bytes()) - if err != nil { - return "", err - } - file.Close() - - if err := cmd("Hyper-V\\Convert-VHD", "-Path", quote(fixed), "-DestinationPath", quote(diskImage), "-VHDType", "Dynamic", "-DeleteSource"); err != nil { - return "", err - } - - if isWindowsAdmin { - if err := cmd("Hyper-V\\Resize-VHD", "-Path", quote(diskImage), "-SizeBytes", toMb(d.DiskSize)); err != nil { - return "", err - } - } - - return diskImage, nil -} diff --git a/vendor/github.com/docker/machine/drivers/hyperv/powershell.go b/vendor/github.com/docker/machine/drivers/hyperv/powershell.go deleted file mode 100644 index 5175791fd516..000000000000 --- a/vendor/github.com/docker/machine/drivers/hyperv/powershell.go +++ /dev/null @@ -1,114 +0,0 @@ -package hyperv - -import ( - "bufio" - "bytes" - "errors" - "os/exec" - "strings" - - "fmt" - - "github.com/docker/machine/libmachine/log" -) - -var powershell string - -var ( - ErrPowerShellNotFound = errors.New("Powershell was not found in the path") - ErrNotAdministrator = errors.New("Hyper-v commands have to be run as an Administrator") - ErrNotInstalled = errors.New("Hyper-V PowerShell Module is not available") -) - -func init() { - powershell, _ = exec.LookPath("powershell.exe") -} - -func cmdOut(args ...string) (string, error) { - args = append([]string{"-NoProfile", "-NonInteractive"}, args...) - cmd := exec.Command(powershell, args...) - log.Debugf("[executing ==>] : %v %v", powershell, strings.Join(args, " ")) - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err := cmd.Run() - log.Debugf("[stdout =====>] : %s", stdout.String()) - log.Debugf("[stderr =====>] : %s", stderr.String()) - return stdout.String(), err -} - -func cmd(args ...string) error { - _, err := cmdOut(args...) - return err -} - -func parseLines(stdout string) []string { - resp := []string{} - - s := bufio.NewScanner(strings.NewReader(stdout)) - for s.Scan() { - resp = append(resp, s.Text()) - } - - return resp -} - -func hypervAvailable() error { - stdout, err := cmdOut("@(Get-Module -ListAvailable hyper-v).Name | Get-Unique") - if err != nil { - return err - } - - resp := parseLines(stdout) - if resp[0] != "Hyper-V" { - return ErrNotInstalled - } - - return nil -} - -func isAdministrator() (bool, error) { - hypervAdmin := isHypervAdministrator() - - if hypervAdmin { - return true, nil - } - - windowsAdmin, err := isWindowsAdministrator() - - if err != nil { - return false, err - } - - return windowsAdmin, nil -} - -func isHypervAdministrator() bool { - stdout, err := cmdOut(`@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole("S-1-5-32-578")`) - if err != nil { - log.Debug(err) - return false - } - - resp := parseLines(stdout) - return resp[0] == "True" -} - -func isWindowsAdministrator() (bool, error) { - stdout, err := cmdOut(`@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] "Administrator")`) - if err != nil { - return false, err - } - - resp := parseLines(stdout) - return resp[0] == "True", nil -} - -func quote(text string) string { - return fmt.Sprintf("'%s'", text) -} - -func toMb(value int) string { - return fmt.Sprintf("%dMB", value) -} diff --git a/vendor/github.com/docker/machine/drivers/none/driver.go b/vendor/github.com/docker/machine/drivers/none/driver.go deleted file mode 100644 index 2e88dbef79a6..000000000000 --- a/vendor/github.com/docker/machine/drivers/none/driver.go +++ /dev/null @@ -1,113 +0,0 @@ -package none - -import ( - "fmt" - neturl "net/url" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" -) - -const driverName = "none" - -// Driver is the driver used when no driver is selected. It is used to -// connect to existing Docker hosts by specifying the URL of the host as -// an option. -type Driver struct { - *drivers.BaseDriver - URL string -} - -func NewDriver(hostName, storePath string) *Driver { - return &Driver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: hostName, - StorePath: storePath, - }, - } -} - -func (d *Driver) GetCreateFlags() []mcnflag.Flag { - return []mcnflag.Flag{ - mcnflag.StringFlag{ - Name: "url", - Usage: "URL of host when no driver is selected", - Value: "", - }, - } -} - -func (d *Driver) Create() error { - return nil -} - -// DriverName returns the name of the driver -func (d *Driver) DriverName() string { - return driverName -} - -func (d *Driver) GetIP() (string, error) { - return d.IPAddress, nil -} - -func (d *Driver) GetSSHHostname() (string, error) { - return "", nil -} - -func (d *Driver) GetSSHKeyPath() string { - return "" -} - -func (d *Driver) GetSSHPort() (int, error) { - return 0, nil -} - -func (d *Driver) GetSSHUsername() string { - return "" -} - -func (d *Driver) GetURL() (string, error) { - return d.URL, nil -} - -func (d *Driver) GetState() (state.State, error) { - return state.Running, nil -} - -func (d *Driver) Kill() error { - return fmt.Errorf("hosts without a driver cannot be killed") -} - -func (d *Driver) Remove() error { - return nil -} - -func (d *Driver) Restart() error { - return fmt.Errorf("hosts without a driver cannot be restarted") -} - -func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error { - url := flags.String("url") - - if url == "" { - return fmt.Errorf("--url option is required when no driver is selected") - } - - d.URL = url - u, err := neturl.Parse(url) - if err != nil { - return err - } - - d.IPAddress = u.Host - return nil -} - -func (d *Driver) Start() error { - return fmt.Errorf("hosts without a driver cannot be started") -} - -func (d *Driver) Stop() error { - return fmt.Errorf("hosts without a driver cannot be stopped") -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/disk.go b/vendor/github.com/docker/machine/drivers/virtualbox/disk.go deleted file mode 100644 index 82f064f6f395..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/disk.go +++ /dev/null @@ -1,141 +0,0 @@ -package virtualbox - -import ( - "fmt" - "io" - "os" - "os/exec" - - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" -) - -type VirtualDisk struct { - UUID string - Path string -} - -type DiskCreator interface { - Create(size int, publicSSHKeyPath, diskPath string) error -} - -func NewDiskCreator() DiskCreator { - return &defaultDiskCreator{} -} - -type defaultDiskCreator struct{} - -// Make a boot2docker VM disk image. -func (c *defaultDiskCreator) Create(size int, publicSSHKeyPath, diskPath string) error { - log.Debugf("Creating %d MB hard disk image...", size) - - tarBuf, err := mcnutils.MakeDiskImage(publicSSHKeyPath) - if err != nil { - return err - } - - log.Debug("Calling inner createDiskImage") - - return createDiskImage(diskPath, size, tarBuf) -} - -// createDiskImage makes a disk image at dest with the given size in MB. If r is -// not nil, it will be read as a raw disk image to convert from. -func createDiskImage(dest string, size int, r io.Reader) error { - // Convert a raw image from stdin to the dest VMDK image. - sizeBytes := int64(size) << 20 // usually won't fit in 32-bit int (max 2GB) - // FIXME: why isn't this just using the vbm*() functions? - cmd := exec.Command(vboxManageCmd, "convertfromraw", "stdin", dest, - fmt.Sprintf("%d", sizeBytes), "--format", "VMDK") - - log.Debug(cmd) - - if os.Getenv("MACHINE_DEBUG") != "" { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - - stdin, err := cmd.StdinPipe() - if err != nil { - return err - } - - log.Debug("Starting command") - - if err := cmd.Start(); err != nil { - return err - } - - log.Debug("Copying to stdin") - - n, err := io.Copy(stdin, r) - if err != nil { - return err - } - - log.Debug("Filling zeroes") - - // The total number of bytes written to stdin must match sizeBytes, or - // VBoxManage.exe on Windows will fail. Fill remaining with zeros. - if left := sizeBytes - n; left > 0 { - if err := zeroFill(stdin, left); err != nil { - return err - } - } - - log.Debug("Closing STDIN") - - // cmd won't exit until the stdin is closed. - if err := stdin.Close(); err != nil { - return err - } - - log.Debug("Waiting on cmd") - - return cmd.Wait() -} - -// zeroFill writes n zero bytes into w. -func zeroFill(w io.Writer, n int64) error { - const blocksize = 32 << 10 - zeros := make([]byte, blocksize) - var k int - var err error - for n > 0 { - if n > blocksize { - k, err = w.Write(zeros) - } else { - k, err = w.Write(zeros[:n]) - } - if err != nil { - return err - } - n -= int64(k) - } - return nil -} - -func getVMDiskInfo(name string, vbox VBoxManager) (*VirtualDisk, error) { - out, err := vbox.vbmOut("showvminfo", name, "--machinereadable") - if err != nil { - return nil, err - } - - disk := &VirtualDisk{} - - err = parseKeyValues(out, reEqualQuoteLine, func(key, val string) error { - switch key { - case "SATA-1-0": - disk.Path = val - case "SATA-ImageUUID-1-0": - disk.UUID = val - } - - return nil - }) - if err != nil { - return nil, err - } - - return disk, nil -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/ip.go b/vendor/github.com/docker/machine/drivers/virtualbox/ip.go deleted file mode 100644 index be22a4fb7a9e..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/ip.go +++ /dev/null @@ -1,36 +0,0 @@ -package virtualbox - -import ( - "time" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/mcnutils" -) - -// IPWaiter waits for an IP to be configured. -type IPWaiter interface { - Wait(d *Driver) error -} - -func NewIPWaiter() IPWaiter { - return &sshIPWaiter{} -} - -type sshIPWaiter struct{} - -func (w *sshIPWaiter) Wait(d *Driver) error { - // Wait for SSH over NAT to be available before returning to user - if err := drivers.WaitForSSH(d); err != nil { - return err - } - - // Bail if we don't get an IP from DHCP after a given number of seconds. - if err := mcnutils.WaitForSpecific(d.hostOnlyIPAvailable, 5, 4*time.Second); err != nil { - return err - } - - var err error - d.IPAddress, err = d.GetIP() - - return err -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/misc.go b/vendor/github.com/docker/machine/drivers/virtualbox/misc.go deleted file mode 100644 index 3972a95443f8..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/misc.go +++ /dev/null @@ -1,112 +0,0 @@ -package virtualbox - -import ( - "bufio" - "math/rand" - "os" - - "time" - - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/ssh" -) - -// B2DUpdater describes the interactions with b2d. -type B2DUpdater interface { - UpdateISOCache(storePath, isoURL string) error - CopyIsoToMachineDir(storePath, machineName, isoURL string) error -} - -func NewB2DUpdater() B2DUpdater { - return &b2dUtilsUpdater{} -} - -type b2dUtilsUpdater struct{} - -func (u *b2dUtilsUpdater) CopyIsoToMachineDir(storePath, machineName, isoURL string) error { - return mcnutils.NewB2dUtils(storePath).CopyIsoToMachineDir(isoURL, machineName) -} - -func (u *b2dUtilsUpdater) UpdateISOCache(storePath, isoURL string) error { - return mcnutils.NewB2dUtils(storePath).UpdateISOCache(isoURL) -} - -// SSHKeyGenerator describes the generation of ssh keys. -type SSHKeyGenerator interface { - Generate(path string) error -} - -func NewSSHKeyGenerator() SSHKeyGenerator { - return &defaultSSHKeyGenerator{} -} - -type defaultSSHKeyGenerator struct{} - -func (g *defaultSSHKeyGenerator) Generate(path string) error { - return ssh.GenerateSSHKey(path) -} - -// LogsReader describes the reading of VBox.log -type LogsReader interface { - Read(path string) ([]string, error) -} - -func NewLogsReader() LogsReader { - return &vBoxLogsReader{} -} - -type vBoxLogsReader struct{} - -func (c *vBoxLogsReader) Read(path string) ([]string, error) { - file, err := os.Open(path) - if err != nil { - return []string{}, err - } - - defer file.Close() - - lines := []string{} - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - - return lines, nil -} - -// RandomInter returns random int values. -type RandomInter interface { - RandomInt(n int) int -} - -func NewRandomInter() RandomInter { - src := rand.NewSource(time.Now().UnixNano()) - - return &defaultRandomInter{ - rand: rand.New(src), - } -} - -type defaultRandomInter struct { - rand *rand.Rand -} - -func (d *defaultRandomInter) RandomInt(n int) int { - return d.rand.Intn(n) -} - -// Sleeper sleeps for given duration. -type Sleeper interface { - Sleep(d time.Duration) -} - -func NewSleeper() Sleeper { - return &defaultSleeper{} -} - -type defaultSleeper struct{} - -func (s *defaultSleeper) Sleep(d time.Duration) { - time.Sleep(d) -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/network.go b/vendor/github.com/docker/machine/drivers/virtualbox/network.go deleted file mode 100644 index 052684020d75..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/network.go +++ /dev/null @@ -1,415 +0,0 @@ -package virtualbox - -import ( - "errors" - "fmt" - "net" - "regexp" - "strings" - "time" - - "runtime" - - "github.com/docker/machine/libmachine/log" -) - -const ( - buggyNetmask = "0f000000" - dhcpPrefix = "HostInterfaceNetworking-" -) - -var ( - reHostOnlyAdapterCreated = regexp.MustCompile(`Interface '(.+)' was successfully created`) - errNewHostOnlyAdapterNotVisible = errors.New("The host-only adapter we just created is not visible. This is a well known VirtualBox bug. You might want to uninstall it and reinstall at least version 5.0.12 that is is supposed to fix this issue") -) - -// Host-only network. -type hostOnlyNetwork struct { - Name string - GUID string - DHCP bool - IPv4 net.IPNet - HwAddr net.HardwareAddr - Medium string - Status string - NetworkName string // referenced in DHCP.NetworkName -} - -// HostInterfaces returns host network interface info. By default delegates to net.Interfaces() -type HostInterfaces interface { - Interfaces() ([]net.Interface, error) - Addrs(iface *net.Interface) ([]net.Addr, error) -} - -func NewHostInterfaces() HostInterfaces { - return &defaultHostInterfaces{} -} - -type defaultHostInterfaces struct { -} - -func (ni *defaultHostInterfaces) Interfaces() ([]net.Interface, error) { - return net.Interfaces() -} - -func (ni *defaultHostInterfaces) Addrs(iface *net.Interface) ([]net.Addr, error) { - return iface.Addrs() -} - -// Save changes the configuration of the host-only network. -func (n *hostOnlyNetwork) Save(vbox VBoxManager) error { - if err := n.SaveIPv4(vbox); err != nil { - return err - } - - if n.DHCP { - vbox.vbm("hostonlyif", "ipconfig", n.Name, "--dhcp") // not implemented as of VirtualBox 4.3 - } - - return nil -} - -// SaveIPv4 changes the ipv4 configuration of the host-only network. -func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error { - if n.IPv4.IP != nil && n.IPv4.Mask != nil { - if runtime.GOOS == "windows" { - log.Warn("Windows might ask for the permission to configure a network adapter. Sometimes, such confirmation window is minimized in the taskbar.") - } - - if err := vbox.vbm("hostonlyif", "ipconfig", n.Name, "--ip", n.IPv4.IP.String(), "--netmask", net.IP(n.IPv4.Mask).String()); err != nil { - return err - } - } - - return nil -} - -// createHostonlyAdapter creates a new host-only network. -func createHostonlyAdapter(vbox VBoxManager) (*hostOnlyNetwork, error) { - if runtime.GOOS == "windows" { - log.Warn("Windows might ask for the permission to create a network adapter. Sometimes, such confirmation window is minimized in the taskbar.") - } - - out, err := vbox.vbmOut("hostonlyif", "create") - if err != nil { - return nil, err - } - - res := reHostOnlyAdapterCreated.FindStringSubmatch(string(out)) - if res == nil { - return nil, errors.New("Failed to create host-only adapter") - } - - return &hostOnlyNetwork{Name: res[1]}, nil -} - -// listHostOnlyAdapters gets all host-only adapters in a map keyed by NetworkName. -func listHostOnlyAdapters(vbox VBoxManager) (map[string]*hostOnlyNetwork, error) { - out, err := vbox.vbmOut("list", "hostonlyifs") - if err != nil { - return nil, err - } - - byName := map[string]*hostOnlyNetwork{} - byIP := map[string]*hostOnlyNetwork{} - n := &hostOnlyNetwork{} - - err = parseKeyValues(out, reColonLine, func(key, val string) error { - switch key { - case "Name": - n.Name = val - case "GUID": - n.GUID = val - case "DHCP": - n.DHCP = (val != "Disabled") - case "IPAddress": - n.IPv4.IP = net.ParseIP(val) - case "NetworkMask": - n.IPv4.Mask = parseIPv4Mask(val) - case "HardwareAddress": - mac, err := net.ParseMAC(val) - if err != nil { - return err - } - n.HwAddr = mac - case "MediumType": - n.Medium = val - case "Status": - n.Status = val - case "VBoxNetworkName": - n.NetworkName = val - - if _, present := byName[n.NetworkName]; present { - return fmt.Errorf("VirtualBox is configured with multiple host-only adapters with the same name %q. Please remove one", n.NetworkName) - } - byName[n.NetworkName] = n - - if len(n.IPv4.IP) != 0 { - if _, present := byIP[n.IPv4.IP.String()]; present { - return fmt.Errorf("VirtualBox is configured with multiple host-only adapters with the same IP %q. Please remove one", n.IPv4.IP) - } - byIP[n.IPv4.IP.String()] = n - } - - n = &hostOnlyNetwork{} - } - - return nil - }) - if err != nil { - return nil, err - } - - return byName, nil -} - -func getHostOnlyAdapter(nets map[string]*hostOnlyNetwork, hostIP net.IP, netmask net.IPMask) *hostOnlyNetwork { - for _, n := range nets { - // Second part of this conditional handles a race where - // VirtualBox returns us the incorrect netmask value for the - // newly created adapter. - if hostIP.Equal(n.IPv4.IP) && - (netmask.String() == n.IPv4.Mask.String() || n.IPv4.Mask.String() == buggyNetmask) { - log.Debugf("Found: %s", n.Name) - return n - } - } - - log.Debug("Not found") - return nil -} - -func getOrCreateHostOnlyNetwork(hostIP net.IP, netmask net.IPMask, nets map[string]*hostOnlyNetwork, vbox VBoxManager) (*hostOnlyNetwork, error) { - // Search for an existing host-only adapter. - hostOnlyAdapter := getHostOnlyAdapter(nets, hostIP, netmask) - if hostOnlyAdapter != nil { - return hostOnlyAdapter, nil - } - - // No existing host-only adapter found. Create a new one. - _, err := createHostonlyAdapter(vbox) - if err != nil { - // Sometimes the host-only adapter fails to create. See https://www.virtualbox.org/ticket/14040 - // BUT, it is created in fact! So let's wait until it appears last in the list - log.Warnf("Creating a new host-only adapter produced an error: %s", err) - log.Warn("This is a known VirtualBox bug. Let's try to recover anyway...") - } - - // It can take some time for an adapter to appear. Let's poll. - hostOnlyAdapter, err = waitForNewHostOnlyNetwork(nets, vbox) - if err != nil { - // Sometimes, Vbox says it created it but then it cannot be found... - return nil, errNewHostOnlyAdapterNotVisible - } - - log.Warnf("Found a new host-only adapter: %q", hostOnlyAdapter.Name) - - hostOnlyAdapter.IPv4.IP = hostIP - hostOnlyAdapter.IPv4.Mask = netmask - if err := hostOnlyAdapter.Save(vbox); err != nil { - return nil, err - } - - return hostOnlyAdapter, nil -} - -func waitForNewHostOnlyNetwork(oldNets map[string]*hostOnlyNetwork, vbox VBoxManager) (*hostOnlyNetwork, error) { - for i := 0; i < 10; i++ { - time.Sleep(1 * time.Second) - - newNets, err := listHostOnlyAdapters(vbox) - if err != nil { - return nil, err - } - - for name, latestNet := range newNets { - if _, present := oldNets[name]; !present { - return latestNet, nil - } - } - } - - return nil, errors.New("Failed to find a new host-only adapter") -} - -// DHCP server info. -type dhcpServer struct { - NetworkName string - IPv4 net.IPNet - LowerIP net.IP - UpperIP net.IP - Enabled bool -} - -// removeOrphanDHCPServers removed the DHCP servers linked to no host-only adapter -func removeOrphanDHCPServers(vbox VBoxManager) error { - dhcps, err := listDHCPServers(vbox) - if err != nil { - return err - } - - if len(dhcps) == 0 { - return nil - } - - log.Debug("Removing orphan DHCP servers...") - - nets, err := listHostOnlyAdapters(vbox) - if err != nil { - return err - } - - for name := range dhcps { - if strings.HasPrefix(name, dhcpPrefix) { - if _, present := nets[name]; !present { - if err := vbox.vbm("dhcpserver", "remove", "--netname", name); err != nil { - log.Warnf("Unable to remove orphan dhcp server %q: %s", name, err) - } - } - } - } - - return nil -} - -// addHostOnlyDHCPServer adds a DHCP server to a host-only network. -func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error { - name := dhcpPrefix + ifname - - dhcps, err := listDHCPServers(vbox) - if err != nil { - return err - } - - // On some platforms (OSX), creating a host-only adapter adds a default dhcpserver, - // while on others (Windows?) it does not. - command := "add" - if dhcp, ok := dhcps[name]; ok { - command = "modify" - if (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && (dhcp.Enabled == d.Enabled) { - // dhcp is up to date - return nil - } - } - - args := []string{"dhcpserver", command, - "--netname", name, - "--ip", d.IPv4.IP.String(), - "--netmask", net.IP(d.IPv4.Mask).String(), - "--lowerip", d.LowerIP.String(), - "--upperip", d.UpperIP.String(), - } - if d.Enabled { - args = append(args, "--enable") - } else { - args = append(args, "--disable") - } - - if runtime.GOOS == "windows" { - log.Warn("Windows might ask for the permission to configure a dhcp server. Sometimes, such confirmation window is minimized in the taskbar.") - } - - return vbox.vbm(args...) -} - -// listDHCPServers lists all DHCP server settings in a map keyed by DHCP.NetworkName. -func listDHCPServers(vbox VBoxManager) (map[string]*dhcpServer, error) { - out, err := vbox.vbmOut("list", "dhcpservers") - if err != nil { - return nil, err - } - - m := map[string]*dhcpServer{} - dhcp := &dhcpServer{} - - err = parseKeyValues(out, reColonLine, func(key, val string) error { - switch key { - case "NetworkName": - dhcp = &dhcpServer{} - m[val] = dhcp - dhcp.NetworkName = val - case "IP": - dhcp.IPv4.IP = net.ParseIP(val) - case "upperIPAddress": - dhcp.UpperIP = net.ParseIP(val) - case "lowerIPAddress": - dhcp.LowerIP = net.ParseIP(val) - case "NetworkMask": - dhcp.IPv4.Mask = parseIPv4Mask(val) - case "Enabled": - dhcp.Enabled = (val == "Yes") - } - - return nil - }) - if err != nil { - return nil, err - } - - return m, nil -} - -// listHostInterfaces returns a map of net.IPNet addresses of host interfaces that are "UP" and not loopback adapters -// and not virtualbox host-only networks (given by excludeNets), keyed by CIDR string. -func listHostInterfaces(hif HostInterfaces, excludeNets map[string]*hostOnlyNetwork) (map[string]*net.IPNet, error) { - ifaces, err := hif.Interfaces() - if err != nil { - return nil, err - } - m := map[string]*net.IPNet{} - - for _, iface := range ifaces { - addrs, err := hif.Addrs(&iface) - if err != nil { - return nil, err - } - - // Check if an address of the interface is in the list of excluded addresses - ifaceExcluded := false - for _, a := range addrs { - switch ipnet := a.(type) { - case *net.IPNet: - _, excluded := excludeNets[ipnet.String()] - if excluded { - ifaceExcluded = true - break - } - } - } - - // If excluded, or not up, or a loopback interface, skip the interface - if ifaceExcluded || iface.Flags&net.FlagUp == 0 || iface.Flags&net.FlagLoopback != 0 { - continue - } - - // This is a host interface, so add all its addresses to the map - for _, a := range addrs { - switch ipnet := a.(type) { - case *net.IPNet: - m[ipnet.String()] = ipnet - } - } - } - return m, nil -} - -// checkIPNetCollision returns true if any host interfaces conflict with the host-only network mask passed as a parameter. -// This works with IPv4 or IPv6 ip addresses. -func checkIPNetCollision(hostonly *net.IPNet, hostIfaces map[string]*net.IPNet) (bool, error) { - for _, ifaceNet := range hostIfaces { - if hostonly.IP.Equal(ifaceNet.IP.Mask(ifaceNet.Mask)) { - return true, nil - } - } - return false, nil -} - -// parseIPv4Mask parses IPv4 netmask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func parseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - return nil - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/vbm.go b/vendor/github.com/docker/machine/drivers/virtualbox/vbm.go deleted file mode 100644 index ea6eb8d57d5c..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/vbm.go +++ /dev/null @@ -1,165 +0,0 @@ -package virtualbox - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "os/exec" - "regexp" - "strings" - - "strconv" - - "time" - - "github.com/docker/machine/libmachine/log" -) - -const ( - retryCountOnObjectNotReadyError = 5 - objectNotReady = "error: The object is not ready" - retryDelay = 100 * time.Millisecond -) - -var ( - reColonLine = regexp.MustCompile(`(.+):\s+(.*)`) - reEqualLine = regexp.MustCompile(`(.+)=(.*)`) - reEqualQuoteLine = regexp.MustCompile(`"(.+)"="(.*)"`) - reMachineNotFound = regexp.MustCompile(`Could not find a registered machine named '(.+)'`) - - ErrMachineNotExist = errors.New("machine does not exist") - ErrVBMNotFound = errors.New("VBoxManage not found. Make sure VirtualBox is installed and VBoxManage is in the path") - - vboxManageCmd = detectVBoxManageCmd() -) - -// VBoxManager defines the interface to communicate to VirtualBox. -type VBoxManager interface { - vbm(args ...string) error - - vbmOut(args ...string) (string, error) - - vbmOutErr(args ...string) (string, string, error) -} - -// VBoxCmdManager communicates with VirtualBox through the commandline using `VBoxManage`. -type VBoxCmdManager struct { - runCmd func(cmd *exec.Cmd) error -} - -// NewVBoxManager creates a VBoxManager instance. -func NewVBoxManager() *VBoxCmdManager { - return &VBoxCmdManager{ - runCmd: func(cmd *exec.Cmd) error { return cmd.Run() }, - } -} - -func (v *VBoxCmdManager) vbm(args ...string) error { - _, _, err := v.vbmOutErr(args...) - return err -} - -func (v *VBoxCmdManager) vbmOut(args ...string) (string, error) { - stdout, _, err := v.vbmOutErr(args...) - return stdout, err -} - -func (v *VBoxCmdManager) vbmOutErr(args ...string) (string, string, error) { - return v.vbmOutErrRetry(retryCountOnObjectNotReadyError, args...) -} - -func (v *VBoxCmdManager) vbmOutErrRetry(retry int, args ...string) (string, string, error) { - cmd := exec.Command(vboxManageCmd, args...) - log.Debugf("COMMAND: %v %v", vboxManageCmd, strings.Join(args, " ")) - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err := v.runCmd(cmd) - stderrStr := stderr.String() - if len(args) > 0 { - log.Debugf("STDOUT:\n{\n%v}", stdout.String()) - log.Debugf("STDERR:\n{\n%v}", stderrStr) - } - - if err != nil { - if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { - err = ErrVBMNotFound - } - } - - // Sometimes, we just need to retry... - if retry > 1 { - if strings.Contains(stderrStr, objectNotReady) { - time.Sleep(retryDelay) - return v.vbmOutErrRetry(retry-1, args...) - } - } - - if err == nil || strings.HasPrefix(err.Error(), "exit status ") { - // VBoxManage will sometimes not set the return code, but has a fatal error - // such as VBoxManage.exe: error: VT-x is not available. (VERR_VMX_NO_VMX) - if strings.Contains(stderrStr, "error:") { - err = fmt.Errorf("%v %v failed:\n%v", vboxManageCmd, strings.Join(args, " "), stderrStr) - } - } - - return stdout.String(), stderrStr, err -} - -func checkVBoxManageVersion(version string) error { - major, minor, err := parseVersion(version) - if (err != nil) || (major < 4) || (major == 4 && minor <= 2) { - return fmt.Errorf("We support Virtualbox starting with version 5. Your VirtualBox install is %q. Please upgrade at https://www.virtualbox.org", version) - } - - if major < 5 { - log.Warnf("You are using version %s of VirtualBox. If you encounter issues, you might want to upgrade to version 5 at https://www.virtualbox.org", version) - } - - return nil -} - -func parseVersion(version string) (int, int, error) { - parts := strings.Split(version, ".") - if len(parts) < 2 { - return 0, 0, fmt.Errorf("Invalid version: %q", version) - } - - major, err := strconv.Atoi(parts[0]) - if err != nil { - return 0, 0, fmt.Errorf("Invalid version: %q", version) - } - - minor, err := strconv.Atoi(parts[1]) - if err != nil { - return 0, 0, fmt.Errorf("Invalid version: %q", version) - } - - return major, minor, err -} - -func parseKeyValues(stdOut string, regexp *regexp.Regexp, callback func(key, val string) error) error { - r := strings.NewReader(stdOut) - s := bufio.NewScanner(r) - - for s.Scan() { - line := s.Text() - if line == "" { - continue - } - - res := regexp.FindStringSubmatch(line) - if res == nil { - continue - } - - key, val := res[1], res[2] - if err := callback(key, val); err != nil { - return err - } - } - - return s.Err() -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox.go b/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox.go deleted file mode 100644 index 3d79e4e9789d..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox.go +++ /dev/null @@ -1,1021 +0,0 @@ -package virtualbox - -import ( - "errors" - "fmt" - "net" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/state" -) - -const ( - defaultCPU = 1 - defaultMemory = 1024 - defaultBoot2DockerURL = "" - defaultBoot2DockerImportVM = "" - defaultHostOnlyCIDR = "192.168.99.1/24" - defaultHostOnlyNictype = "82540EM" - defaultHostOnlyPromiscMode = "deny" - defaultUIType = "headless" - defaultHostOnlyNoDHCP = false - defaultDiskSize = 20000 - defaultDNSProxy = true - defaultDNSResolver = false -) - -var ( - ErrUnableToGenerateRandomIP = errors.New("unable to generate random IP") - ErrMustEnableVTX = errors.New("This computer doesn't have VT-X/AMD-v enabled. Enabling it in the BIOS is mandatory") - ErrNotCompatibleWithHyperV = errors.New("This computer is running Hyper-V. VirtualBox won't boot a 64bits VM when Hyper-V is activated. Either use Hyper-V as a driver, or disable the Hyper-V hypervisor. (To skip this check, use --virtualbox-no-vtx-check)") - ErrNetworkAddrCidr = errors.New("host-only cidr must be specified with a host address, not a network address") - ErrNetworkAddrCollision = errors.New("host-only cidr conflicts with the network address of a host interface") -) - -type Driver struct { - *drivers.BaseDriver - VBoxManager - HostInterfaces - b2dUpdater B2DUpdater - sshKeyGenerator SSHKeyGenerator - diskCreator DiskCreator - logsReader LogsReader - ipWaiter IPWaiter - randomInter RandomInter - sleeper Sleeper - CPU int - Memory int - DiskSize int - NatNicType string - Boot2DockerURL string - Boot2DockerImportVM string - HostDNSResolver bool - HostOnlyCIDR string - HostOnlyNicType string - HostOnlyPromiscMode string - UIType string - HostOnlyNoDHCP bool - NoShare bool - DNSProxy bool - NoVTXCheck bool - ShareFolder string -} - -// NewDriver creates a new VirtualBox driver with default settings. -func NewDriver(hostName, storePath string) *Driver { - return &Driver{ - VBoxManager: NewVBoxManager(), - b2dUpdater: NewB2DUpdater(), - sshKeyGenerator: NewSSHKeyGenerator(), - diskCreator: NewDiskCreator(), - logsReader: NewLogsReader(), - ipWaiter: NewIPWaiter(), - randomInter: NewRandomInter(), - sleeper: NewSleeper(), - HostInterfaces: NewHostInterfaces(), - Memory: defaultMemory, - CPU: defaultCPU, - DiskSize: defaultDiskSize, - NatNicType: defaultHostOnlyNictype, - HostOnlyCIDR: defaultHostOnlyCIDR, - HostOnlyNicType: defaultHostOnlyNictype, - HostOnlyPromiscMode: defaultHostOnlyPromiscMode, - UIType: defaultUIType, - HostOnlyNoDHCP: defaultHostOnlyNoDHCP, - DNSProxy: defaultDNSProxy, - HostDNSResolver: defaultDNSResolver, - BaseDriver: &drivers.BaseDriver{ - MachineName: hostName, - StorePath: storePath, - }, - } -} - -// GetCreateFlags registers the flags this driver adds to -// "docker hosts create" -func (d *Driver) GetCreateFlags() []mcnflag.Flag { - return []mcnflag.Flag{ - mcnflag.IntFlag{ - Name: "virtualbox-memory", - Usage: "Size of memory for host in MB", - Value: defaultMemory, - EnvVar: "VIRTUALBOX_MEMORY_SIZE", - }, - mcnflag.IntFlag{ - Name: "virtualbox-cpu-count", - Usage: "number of CPUs for the machine (-1 to use the number of CPUs available)", - Value: defaultCPU, - EnvVar: "VIRTUALBOX_CPU_COUNT", - }, - mcnflag.IntFlag{ - Name: "virtualbox-disk-size", - Usage: "Size of disk for host in MB", - Value: defaultDiskSize, - EnvVar: "VIRTUALBOX_DISK_SIZE", - }, - mcnflag.StringFlag{ - Name: "virtualbox-boot2docker-url", - Usage: "The URL of the boot2docker image. Defaults to the latest available version", - Value: defaultBoot2DockerURL, - EnvVar: "VIRTUALBOX_BOOT2DOCKER_URL", - }, - mcnflag.StringFlag{ - Name: "virtualbox-import-boot2docker-vm", - Usage: "The name of a Boot2Docker VM to import", - Value: defaultBoot2DockerImportVM, - EnvVar: "VIRTUALBOX_BOOT2DOCKER_IMPORT_VM", - }, - mcnflag.BoolFlag{ - Name: "virtualbox-host-dns-resolver", - Usage: "Use the host DNS resolver", - EnvVar: "VIRTUALBOX_HOST_DNS_RESOLVER", - }, - mcnflag.StringFlag{ - Name: "virtualbox-nat-nictype", - Usage: "Specify the Network Adapter Type", - Value: defaultHostOnlyNictype, - EnvVar: "VIRTUALBOX_NAT_NICTYPE", - }, - mcnflag.StringFlag{ - Name: "virtualbox-hostonly-cidr", - Usage: "Specify the Host Only CIDR", - Value: defaultHostOnlyCIDR, - EnvVar: "VIRTUALBOX_HOSTONLY_CIDR", - }, - mcnflag.StringFlag{ - Name: "virtualbox-hostonly-nictype", - Usage: "Specify the Host Only Network Adapter Type", - Value: defaultHostOnlyNictype, - EnvVar: "VIRTUALBOX_HOSTONLY_NIC_TYPE", - }, - mcnflag.StringFlag{ - Name: "virtualbox-hostonly-nicpromisc", - Usage: "Specify the Host Only Network Adapter Promiscuous Mode", - Value: defaultHostOnlyPromiscMode, - EnvVar: "VIRTUALBOX_HOSTONLY_NIC_PROMISC", - }, - mcnflag.StringFlag{ - Name: "virtualbox-ui-type", - Usage: "Specify the UI Type: (gui|sdl|headless|separate)", - Value: defaultUIType, - EnvVar: "VIRTUALBOX_UI_TYPE", - }, - mcnflag.BoolFlag{ - Name: "virtualbox-hostonly-no-dhcp", - Usage: "Disable the Host Only DHCP Server", - EnvVar: "VIRTUALBOX_HOSTONLY_NO_DHCP", - }, - mcnflag.BoolFlag{ - Name: "virtualbox-no-share", - Usage: "Disable the mount of your home directory", - EnvVar: "VIRTUALBOX_NO_SHARE", - }, - mcnflag.BoolFlag{ - Name: "virtualbox-no-dns-proxy", - Usage: "Disable proxying all DNS requests to the host", - EnvVar: "VIRTUALBOX_NO_DNS_PROXY", - }, - mcnflag.BoolFlag{ - Name: "virtualbox-no-vtx-check", - Usage: "Disable checking for the availability of hardware virtualization before the vm is started", - EnvVar: "VIRTUALBOX_NO_VTX_CHECK", - }, - mcnflag.StringFlag{ - EnvVar: "VIRTUALBOX_SHARE_FOLDER", - Name: "virtualbox-share-folder", - Usage: "Mount the specified directory instead of the default home location. Format: dir:name", - }, - } -} - -func (d *Driver) GetSSHHostname() (string, error) { - return "127.0.0.1", nil -} - -func (d *Driver) GetSSHUsername() string { - if d.SSHUser == "" { - d.SSHUser = "docker" - } - - return d.SSHUser -} - -// DriverName returns the name of the driver -func (d *Driver) DriverName() string { - return "virtualbox" -} - -func (d *Driver) GetURL() (string, error) { - ip, err := d.GetIP() - if err != nil { - return "", err - } - if ip == "" { - return "", nil - } - return fmt.Sprintf("tcp://%s:2376", ip), nil -} - -func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error { - d.CPU = flags.Int("virtualbox-cpu-count") - d.Memory = flags.Int("virtualbox-memory") - d.DiskSize = flags.Int("virtualbox-disk-size") - d.Boot2DockerURL = flags.String("virtualbox-boot2docker-url") - d.SetSwarmConfigFromFlags(flags) - d.SSHUser = "docker" - d.Boot2DockerImportVM = flags.String("virtualbox-import-boot2docker-vm") - d.HostDNSResolver = flags.Bool("virtualbox-host-dns-resolver") - d.NatNicType = flags.String("virtualbox-nat-nictype") - d.HostOnlyCIDR = flags.String("virtualbox-hostonly-cidr") - d.HostOnlyNicType = flags.String("virtualbox-hostonly-nictype") - d.HostOnlyPromiscMode = flags.String("virtualbox-hostonly-nicpromisc") - d.UIType = flags.String("virtualbox-ui-type") - d.HostOnlyNoDHCP = flags.Bool("virtualbox-hostonly-no-dhcp") - d.NoShare = flags.Bool("virtualbox-no-share") - d.DNSProxy = !flags.Bool("virtualbox-no-dns-proxy") - d.NoVTXCheck = flags.Bool("virtualbox-no-vtx-check") - d.ShareFolder = flags.String("virtualbox-share-folder") - - return nil -} - -// PreCreateCheck checks that VBoxManage exists and works -func (d *Driver) PreCreateCheck() error { - // Check that VBoxManage exists and works - version, err := d.vbmOut("--version") - if err != nil { - return err - } - - // Check that VBoxManage is of a supported version - if err = checkVBoxManageVersion(strings.TrimSpace(version)); err != nil { - return err - } - - if !d.NoVTXCheck { - if isHyperVInstalled() { - return ErrNotCompatibleWithHyperV - } - - if d.IsVTXDisabled() { - return ErrMustEnableVTX - } - } - - // Downloading boot2docker to cache should be done here to make sure - // that a download failure will not leave a machine half created. - if err := d.b2dUpdater.UpdateISOCache(d.StorePath, d.Boot2DockerURL); err != nil { - return err - } - - // Check that Host-only interfaces are ok - if _, err = listHostOnlyAdapters(d.VBoxManager); err != nil { - return err - } - - return nil -} - -func (d *Driver) Create() error { - if err := d.CreateVM(); err != nil { - return err - } - - log.Info("Starting the VM...") - return d.Start() -} - -func (d *Driver) CreateVM() error { - if err := d.b2dUpdater.CopyIsoToMachineDir(d.StorePath, d.MachineName, d.Boot2DockerURL); err != nil { - return err - } - - log.Info("Creating VirtualBox VM...") - - // import b2d VM if requested - if d.Boot2DockerImportVM != "" { - name := d.Boot2DockerImportVM - - // make sure vm is stopped - _ = d.vbm("controlvm", name, "poweroff") - - diskInfo, err := getVMDiskInfo(name, d.VBoxManager) - if err != nil { - return err - } - - if _, err := os.Stat(diskInfo.Path); err != nil { - return err - } - - if err := d.vbm("clonehd", diskInfo.Path, d.diskPath()); err != nil { - return err - } - - log.Debugf("Importing VM settings...") - vmInfo, err := getVMInfo(name, d.VBoxManager) - if err != nil { - return err - } - - d.CPU = vmInfo.CPUs - d.Memory = vmInfo.Memory - - log.Debugf("Importing SSH key...") - keyPath := filepath.Join(mcnutils.GetHomeDir(), ".ssh", "id_boot2docker") - if err := mcnutils.CopyFile(keyPath, d.GetSSHKeyPath()); err != nil { - return err - } - } else { - log.Infof("Creating SSH key...") - if err := d.sshKeyGenerator.Generate(d.GetSSHKeyPath()); err != nil { - return err - } - - log.Debugf("Creating disk image...") - if err := d.diskCreator.Create(d.DiskSize, d.publicSSHKeyPath(), d.diskPath()); err != nil { - return err - } - } - - if err := d.vbm("createvm", - "--basefolder", d.ResolveStorePath("."), - "--name", d.MachineName, - "--register"); err != nil { - return err - } - - log.Debugf("VM CPUS: %d", d.CPU) - log.Debugf("VM Memory: %d", d.Memory) - - cpus := d.CPU - if cpus < 1 { - cpus = int(runtime.NumCPU()) - } - if cpus > 32 { - cpus = 32 - } - - hostDNSResolver := "off" - if d.HostDNSResolver { - hostDNSResolver = "on" - } - - dnsProxy := "off" - if d.DNSProxy { - dnsProxy = "on" - } - - var modifyFlags = []string{ - "modifyvm", d.MachineName, - "--firmware", "bios", - "--bioslogofadein", "off", - "--bioslogofadeout", "off", - "--bioslogodisplaytime", "0", - "--biosbootmenu", "disabled", - "--ostype", "Linux26_64", - "--cpus", fmt.Sprintf("%d", cpus), - "--memory", fmt.Sprintf("%d", d.Memory), - "--acpi", "on", - "--ioapic", "on", - "--rtcuseutc", "on", - "--natdnshostresolver1", hostDNSResolver, - "--natdnsproxy1", dnsProxy, - "--cpuhotplug", "off", - "--pae", "on", - "--hpet", "on", - "--hwvirtex", "on", - "--nestedpaging", "on", - "--largepages", "on", - "--vtxvpid", "on", - "--accelerate3d", "off", - "--boot1", "dvd"} - - if runtime.GOOS == "windows" && runtime.GOARCH == "386" { - modifyFlags = append(modifyFlags, "--longmode", "on") - } - - if err := d.vbm(modifyFlags...); err != nil { - return err - } - - if err := d.vbm("modifyvm", d.MachineName, - "--nic1", "nat", - "--nictype1", d.NatNicType, - "--cableconnected1", "on"); err != nil { - return err - } - - if err := d.vbm("storagectl", d.MachineName, - "--name", "SATA", - "--add", "sata", - "--hostiocache", "on"); err != nil { - return err - } - - if err := d.vbm("storageattach", d.MachineName, - "--storagectl", "SATA", - "--port", "0", - "--device", "0", - "--type", "dvddrive", - "--medium", d.ResolveStorePath("boot2docker.iso")); err != nil { - return err - } - - if err := d.vbm("storageattach", d.MachineName, - "--storagectl", "SATA", - "--port", "1", - "--device", "0", - "--type", "hdd", - "--medium", d.diskPath()); err != nil { - return err - } - - // let VBoxService do nice magic automounting (when it's used) - if err := d.vbm("guestproperty", "set", d.MachineName, "/VirtualBox/GuestAdd/SharedFolders/MountPrefix", "/"); err != nil { - return err - } - if err := d.vbm("guestproperty", "set", d.MachineName, "/VirtualBox/GuestAdd/SharedFolders/MountDir", "/"); err != nil { - return err - } - - shareName, shareDir := getShareDriveAndName() - - if d.ShareFolder != "" { - shareDir, shareName = parseShareFolder(d.ShareFolder) - } - - if shareDir != "" && !d.NoShare { - log.Debugf("setting up shareDir '%s' -> '%s'", shareDir, shareName) - if _, err := os.Stat(shareDir); err != nil && !os.IsNotExist(err) { - return err - } else if !os.IsNotExist(err) { - if shareName == "" { - // parts of the VBox internal code are buggy with share names that start with "/" - shareName = strings.TrimLeft(shareDir, "/") - // TODO do some basic Windows -> MSYS path conversion - // ie, s!^([a-z]+):[/\\]+!\1/!; s!\\!/!g - } - - // woo, shareDir exists! let's carry on! - if err := d.vbm("sharedfolder", "add", d.MachineName, "--name", shareName, "--hostpath", shareDir, "--automount"); err != nil { - return err - } - - // enable symlinks - if err := d.vbm("setextradata", d.MachineName, "VBoxInternal2/SharedFoldersEnableSymlinksCreate/"+shareName, "1"); err != nil { - return err - } - } - } - - return nil -} - -func parseShareFolder(shareFolder string) (string, string) { - split := strings.Split(shareFolder, ":") - shareDir := strings.Join(split[:len(split)-1], ":") - shareName := split[len(split)-1] - return shareDir, shareName -} - -func (d *Driver) hostOnlyIPAvailable() bool { - ip, err := d.GetIP() - if err != nil { - log.Debugf("ERROR getting IP: %s", err) - return false - } - if ip == "" { - log.Debug("Strangely, there was no error attempting to get the IP, but it was still empty.") - return false - } - - log.Debugf("IP is %s", ip) - return true -} - -func (d *Driver) Start() error { - s, err := d.GetState() - if err != nil { - return err - } - - var hostOnlyAdapter *hostOnlyNetwork - if s == state.Stopped { - log.Infof("Check network to re-create if needed...") - - if hostOnlyAdapter, err = d.setupHostOnlyNetwork(d.MachineName); err != nil { - return fmt.Errorf("Error setting up host only network on machine start: %s", err) - } - } - - switch s { - case state.Stopped, state.Saved: - d.SSHPort, err = setPortForwarding(d, 1, "ssh", "tcp", 22, d.SSHPort) - if err != nil { - return err - } - - if err := d.vbm("startvm", d.MachineName, "--type", d.UIType); err != nil { - if lines, readErr := d.readVBoxLog(); readErr == nil && len(lines) > 0 { - return fmt.Errorf("Unable to start the VM: %s\nDetails: %s", err, lines[len(lines)-1]) - } - return fmt.Errorf("Unable to start the VM: %s", err) - } - case state.Paused: - if err := d.vbm("controlvm", d.MachineName, "resume", "--type", d.UIType); err != nil { - return err - } - log.Infof("Resuming VM ...") - default: - log.Infof("VM not in restartable state") - } - - if !d.NoVTXCheck { - // Verify that VT-X is not disabled in the started VM - vtxIsDisabled, err := d.IsVTXDisabledInTheVM() - if err != nil { - return fmt.Errorf("Checking if hardware virtualization is enabled failed: %s", err) - } - - if vtxIsDisabled { - return ErrMustEnableVTX - } - } - - log.Infof("Waiting for an IP...") - if err := d.ipWaiter.Wait(d); err != nil { - return err - } - - if hostOnlyAdapter == nil { - return nil - } - - // Check that the host-only adapter we just created can still be found - // Sometimes it is corrupted after the VM is started. - nets, err := listHostOnlyAdapters(d.VBoxManager) - if err != nil { - return err - } - - ip, network, err := parseAndValidateCIDR(d.HostOnlyCIDR) - if err != nil { - return err - } - - err = validateNoIPCollisions(d.HostInterfaces, network, nets) - if err != nil { - return err - } - - hostOnlyNet := getHostOnlyAdapter(nets, ip, network.Mask) - if hostOnlyNet != nil { - // OK, we found a valid host-only adapter - return nil - } - - // This happens a lot on windows. The adapter has an invalid IP and the VM has the same IP - log.Warn("The host-only adapter is corrupted. Let's stop the VM, fix the host-only adapter and restart the VM") - if err := d.Stop(); err != nil { - return err - } - - // We have to be sure the host-only adapter is not used by the VM - d.sleeper.Sleep(5 * time.Second) - - log.Debugf("Fixing %+v...", hostOnlyAdapter) - if err := hostOnlyAdapter.SaveIPv4(d.VBoxManager); err != nil { - return err - } - - // We have to be sure the adapter is updated before starting the VM - d.sleeper.Sleep(5 * time.Second) - - if err := d.vbm("startvm", d.MachineName, "--type", d.UIType); err != nil { - return fmt.Errorf("Unable to start the VM: %s", err) - } - - log.Infof("Waiting for an IP...") - return d.ipWaiter.Wait(d) -} - -func (d *Driver) Stop() error { - currentState, err := d.GetState() - if err != nil { - return err - } - - if currentState == state.Paused { - if err := d.vbm("controlvm", d.MachineName, "resume"); err != nil { // , "--type", d.UIType - return err - } - log.Infof("Resuming VM ...") - } - - if err := d.vbm("controlvm", d.MachineName, "acpipowerbutton"); err != nil { - return err - } - for { - s, err := d.GetState() - if err != nil { - return err - } - if s == state.Running { - d.sleeper.Sleep(1 * time.Second) - } else { - break - } - } - - d.IPAddress = "" - - return nil -} - -// Restart restarts a machine which is known to be running. -func (d *Driver) Restart() error { - if err := d.Stop(); err != nil { - return fmt.Errorf("Problem stopping the VM: %s", err) - } - - if err := d.Start(); err != nil { - return fmt.Errorf("Problem starting the VM: %s", err) - } - - d.IPAddress = "" - - return d.ipWaiter.Wait(d) -} - -func (d *Driver) Kill() error { - return d.vbm("controlvm", d.MachineName, "poweroff") -} - -func (d *Driver) Remove() error { - s, err := d.GetState() - if err == ErrMachineNotExist { - return nil - } - if err != nil { - return err - } - - if s != state.Stopped && s != state.Saved { - if err := d.Kill(); err != nil { - return err - } - } - - return d.vbm("unregistervm", "--delete", d.MachineName) -} - -func (d *Driver) GetState() (state.State, error) { - stdout, stderr, err := d.vbmOutErr("showvminfo", d.MachineName, "--machinereadable") - if err != nil { - if reMachineNotFound.FindString(stderr) != "" { - return state.Error, ErrMachineNotExist - } - return state.Error, err - } - re := regexp.MustCompile(`(?m)^VMState="(\w+)"`) - groups := re.FindStringSubmatch(stdout) - if len(groups) < 1 { - return state.None, nil - } - switch groups[1] { - case "running": - return state.Running, nil - case "paused": - return state.Paused, nil - case "saved": - return state.Saved, nil - case "poweroff", "aborted": - return state.Stopped, nil - } - return state.None, nil -} - -func (d *Driver) getHostOnlyMACAddress() (string, error) { - // Return the MAC address of the host-only adapter - // assigned to this machine. The returned address - // is lower-cased and does not contain colons. - - stdout, stderr, err := d.vbmOutErr("showvminfo", d.MachineName, "--machinereadable") - if err != nil { - if reMachineNotFound.FindString(stderr) != "" { - return "", ErrMachineNotExist - } - return "", err - } - - // First, we get the number of the host-only interface - re := regexp.MustCompile(`(?m)^hostonlyadapter([\d]+)`) - groups := re.FindStringSubmatch(stdout) - if len(groups) < 2 { - return "", errors.New("Machine does not have a host-only adapter") - } - - // Then we grab the MAC address based on that number - adapterNumber := groups[1] - re = regexp.MustCompile(fmt.Sprintf("(?m)^macaddress%s=\"(.*)\"", adapterNumber)) - groups = re.FindStringSubmatch(stdout) - if len(groups) < 2 { - return "", fmt.Errorf("Could not find MAC address for adapter %v", adapterNumber) - } - - return strings.ToLower(groups[1]), nil -} - -func (d *Driver) parseIPForMACFromIPAddr(ipAddrOutput string, macAddress string) (string, error) { - // Given the output of "ip addr show" on the VM, return the IPv4 address - // of the interface with the given MAC address. - - lines := strings.Split(ipAddrOutput, "\n") - returnNextIP := false - - for _, line := range lines { - line = strings.TrimSpace(line) - - if strings.HasPrefix(line, "link") { // line contains MAC address - vals := strings.Split(line, " ") - if len(vals) >= 2 { - macBlock := vals[1] - macWithoutColons := strings.Replace(macBlock, ":", "", -1) - if macWithoutColons == macAddress { // we are in the correct device block - returnNextIP = true - } - } - } else if strings.HasPrefix(line, "inet") && !strings.HasPrefix(line, "inet6") && returnNextIP { - vals := strings.Split(line, " ") - if len(vals) >= 2 { - return vals[1][:strings.Index(vals[1], "/")], nil - } - } - } - - return "", fmt.Errorf("Could not find matching IP for MAC address %v", macAddress) -} - -func (d *Driver) GetIP() (string, error) { - // DHCP is used to get the IP, so virtualbox hosts don't have IPs unless - // they are running - s, err := d.GetState() - if err != nil { - return "", err - } - if s != state.Running { - return "", drivers.ErrHostIsNotRunning - } - - macAddress, err := d.getHostOnlyMACAddress() - if err != nil { - return "", err - } - - log.Debugf("Host-only MAC: %s\n", macAddress) - - output, err := drivers.RunSSHCommandFromDriver(d, "ip addr show") - if err != nil { - return "", err - } - - log.Debugf("SSH returned: %s\nEND SSH\n", output) - - ipAddress, err := d.parseIPForMACFromIPAddr(output, macAddress) - if err != nil { - return "", err - } - - return ipAddress, nil -} - -func (d *Driver) publicSSHKeyPath() string { - return d.GetSSHKeyPath() + ".pub" -} - -func (d *Driver) diskPath() string { - return d.ResolveStorePath("disk.vmdk") -} - -func (d *Driver) setupHostOnlyNetwork(machineName string) (*hostOnlyNetwork, error) { - hostOnlyCIDR := d.HostOnlyCIDR - - // This is to assist in migrating from version 0.2 to 0.3 format - // it should be removed in a later release - if hostOnlyCIDR == "" { - hostOnlyCIDR = defaultHostOnlyCIDR - } - - ip, network, err := parseAndValidateCIDR(hostOnlyCIDR) - if err != nil { - return nil, err - } - - nets, err := listHostOnlyAdapters(d.VBoxManager) - if err != nil { - return nil, err - } - - err = validateNoIPCollisions(d.HostInterfaces, network, nets) - if err != nil { - return nil, err - } - - log.Debugf("Searching for hostonly interface for IPv4: %s and Mask: %s", ip, network.Mask) - hostOnlyAdapter, err := getOrCreateHostOnlyNetwork(ip, network.Mask, nets, d.VBoxManager) - if err != nil { - return nil, err - } - - if err := removeOrphanDHCPServers(d.VBoxManager); err != nil { - return nil, err - } - - dhcpAddr, err := getRandomIPinSubnet(d, ip) - if err != nil { - return nil, err - } - - lowerIP, upperIP := getDHCPAddressRange(dhcpAddr, network) - - log.Debugf("Adding/Modifying DHCP server %q with address range %q - %q...", dhcpAddr, lowerIP, upperIP) - - dhcp := dhcpServer{} - dhcp.IPv4.IP = dhcpAddr - dhcp.IPv4.Mask = network.Mask - dhcp.LowerIP = lowerIP - dhcp.UpperIP = upperIP - dhcp.Enabled = !d.HostOnlyNoDHCP - if err := addHostOnlyDHCPServer(hostOnlyAdapter.Name, dhcp, d.VBoxManager); err != nil { - return nil, err - } - - if err := d.vbm("modifyvm", machineName, - "--nic2", "hostonly", - "--nictype2", d.HostOnlyNicType, - "--nicpromisc2", d.HostOnlyPromiscMode, - "--hostonlyadapter2", hostOnlyAdapter.Name, - "--cableconnected2", "on"); err != nil { - return nil, err - } - - return hostOnlyAdapter, nil -} - -func getDHCPAddressRange(dhcpAddr net.IP, network *net.IPNet) (lowerIP net.IP, upperIP net.IP) { - nAddr := network.IP.To4() - ones, bits := network.Mask.Size() - - if ones <= 24 { - // For a /24 subnet, use the original behavior of allowing the address range - // between x.x.x.100 and x.x.x.254. - lowerIP = net.IPv4(nAddr[0], nAddr[1], nAddr[2], byte(100)) - upperIP = net.IPv4(nAddr[0], nAddr[1], nAddr[2], byte(254)) - return - } - - // Start the lowerIP range one address above the selected DHCP address. - lowerIP = net.IPv4(nAddr[0], nAddr[1], nAddr[2], dhcpAddr.To4()[3]+1) - - // The highest D-part of the address A.B.C.D in this subnet is at 2^n - 1, - // where n is the number of available bits in the subnet. Since the highest - // address is reserved for subnet broadcast, the highest *assignable* address - // is at (2^n - 1) - 1 == 2^n - 2. - maxAssignableSubnetAddress := (byte)((1 << (uint)(bits-ones)) - 2) - upperIP = net.IPv4(nAddr[0], nAddr[1], nAddr[2], maxAssignableSubnetAddress) - return -} - -func parseAndValidateCIDR(hostOnlyCIDR string) (net.IP, *net.IPNet, error) { - ip, network, err := net.ParseCIDR(hostOnlyCIDR) - if err != nil { - return nil, nil, err - } - - networkAddress := network.IP.To4() - if ip.Equal(networkAddress) { - return nil, nil, ErrNetworkAddrCidr - } - - return ip, network, nil -} - -// validateNoIPCollisions ensures no conflicts between the host's network interfaces and the vbox host-only network that -// will be used for machine vm instances. -func validateNoIPCollisions(hif HostInterfaces, hostOnlyNet *net.IPNet, currHostOnlyNets map[string]*hostOnlyNetwork) error { - hostOnlyByCIDR := map[string]*hostOnlyNetwork{} - //listHostOnlyAdapters returns a map w/ virtualbox net names as key. Rekey to CIDRs - for _, n := range currHostOnlyNets { - ipnet := net.IPNet{IP: n.IPv4.IP, Mask: n.IPv4.Mask} - hostOnlyByCIDR[ipnet.String()] = n - } - - m, err := listHostInterfaces(hif, hostOnlyByCIDR) - if err != nil { - return err - } - - collision, err := checkIPNetCollision(hostOnlyNet, m) - if err != nil { - return err - } - - if collision { - return ErrNetworkAddrCollision - } - return nil -} - -// Select an available port, trying the specified -// port first, falling back on an OS selected port. -func getAvailableTCPPort(port int) (int, error) { - for i := 0; i <= 10; i++ { - ln, err := net.Listen("tcp4", fmt.Sprintf("127.0.0.1:%d", port)) - if err != nil { - return 0, err - } - defer ln.Close() - addr := ln.Addr().String() - addrParts := strings.SplitN(addr, ":", 2) - p, err := strconv.Atoi(addrParts[1]) - if err != nil { - return 0, err - } - if p != 0 { - port = p - return port, nil - } - port = 0 // Throw away the port hint before trying again - time.Sleep(1) - } - return 0, fmt.Errorf("unable to allocate tcp port") -} - -// Setup a NAT port forwarding entry. -func setPortForwarding(d *Driver, interfaceNum int, mapName, protocol string, guestPort, desiredHostPort int) (int, error) { - actualHostPort, err := getAvailableTCPPort(desiredHostPort) - if err != nil { - return -1, err - } - if desiredHostPort != actualHostPort && desiredHostPort != 0 { - log.Debugf("NAT forwarding host port for guest port %d (%s) changed from %d to %d", - guestPort, mapName, desiredHostPort, actualHostPort) - } - cmd := fmt.Sprintf("--natpf%d", interfaceNum) - d.vbm("modifyvm", d.MachineName, cmd, "delete", mapName) - if err := d.vbm("modifyvm", d.MachineName, - cmd, fmt.Sprintf("%s,%s,127.0.0.1,%d,,%d", mapName, protocol, actualHostPort, guestPort)); err != nil { - return -1, err - } - return actualHostPort, nil -} - -// getRandomIPinSubnet returns a pseudo-random net.IP in the same -// subnet as the IP passed -func getRandomIPinSubnet(d *Driver, baseIP net.IP) (net.IP, error) { - var dhcpAddr net.IP - - nAddr := baseIP.To4() - // select pseudo-random DHCP addr; make sure not to clash with the host - // only try 5 times and bail if no random received - for i := 0; i < 5; i++ { - n := d.randomInter.RandomInt(24) + 1 - if byte(n) != nAddr[3] { - dhcpAddr = net.IPv4(nAddr[0], nAddr[1], nAddr[2], byte(n)) - break - } - } - - if dhcpAddr == nil { - return nil, ErrUnableToGenerateRandomIP - } - - return dhcpAddr, nil -} - -func detectVBoxManageCmdInPath() string { - cmd := "VBoxManage" - if path, err := exec.LookPath(cmd); err == nil { - return path - } - return cmd -} - -func (d *Driver) readVBoxLog() ([]string, error) { - logPath := filepath.Join(d.ResolveStorePath(d.MachineName), "Logs", "VBox.log") - log.Debugf("Checking vm logs: %s", logPath) - - return d.logsReader.Read(logPath) -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_darwin.go b/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_darwin.go deleted file mode 100644 index 0c225bde6d8e..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -package virtualbox - -func detectVBoxManageCmd() string { - return detectVBoxManageCmdInPath() -} - -func getShareDriveAndName() (string, string) { - return "Users", "/Users" -} - -func isHyperVInstalled() bool { - return false -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_freebsd.go b/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_freebsd.go deleted file mode 100644 index f41429e2ec61..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package virtualbox - -func detectVBoxManageCmd() string { - return detectVBoxManageCmdInPath() -} - -func getShareDriveAndName() (string, string) { - return "hosthome", "/home" -} - -func isHyperVInstalled() bool { - return false -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_linux.go b/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_linux.go deleted file mode 100644 index f41429e2ec61..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package virtualbox - -func detectVBoxManageCmd() string { - return detectVBoxManageCmdInPath() -} - -func getShareDriveAndName() (string, string) { - return "hosthome", "/home" -} - -func isHyperVInstalled() bool { - return false -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_openbsd.go b/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_openbsd.go deleted file mode 100644 index f41429e2ec61..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_openbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package virtualbox - -func detectVBoxManageCmd() string { - return detectVBoxManageCmdInPath() -} - -func getShareDriveAndName() (string, string) { - return "hosthome", "/home" -} - -func isHyperVInstalled() bool { - return false -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_windows.go b/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_windows.go deleted file mode 100644 index ff3fe986ab29..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/virtualbox_windows.go +++ /dev/null @@ -1,105 +0,0 @@ -package virtualbox - -import ( - "strings" - - "fmt" - "os" - "os/exec" - "path/filepath" - - "github.com/docker/machine/libmachine/log" - "golang.org/x/sys/windows/registry" -) - -// cmdOutput runs a shell command and returns its output. -func cmdOutput(name string, args ...string) (string, error) { - cmd := exec.Command(name, args...) - log.Debugf("COMMAND: %v %v", name, strings.Join(args, " ")) - - stdout, err := cmd.Output() - if err != nil { - return "", err - } - - log.Debugf("STDOUT:\n{\n%v}", string(stdout)) - - return string(stdout), nil -} - -func detectVBoxManageCmd() string { - cmd := "VBoxManage" - if p := os.Getenv("VBOX_INSTALL_PATH"); p != "" { - if path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil { - return path - } - } - - if p := os.Getenv("VBOX_MSI_INSTALL_PATH"); p != "" { - if path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil { - return path - } - } - - // Look in default installation path for VirtualBox version > 5 - if path, err := exec.LookPath(filepath.Join("C:\\Program Files\\Oracle\\VirtualBox", cmd)); err == nil { - return path - } - - // Look in windows registry - if p, err := findVBoxInstallDirInRegistry(); err == nil { - if path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil { - return path - } - } - - return detectVBoxManageCmdInPath() //fallback to path -} - -func findVBoxInstallDirInRegistry() (string, error) { - registryKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Oracle\VirtualBox`, registry.QUERY_VALUE) - if err != nil { - errorMessage := fmt.Sprintf("Can't find VirtualBox registry entries, is VirtualBox really installed properly? %s", err) - log.Debugf(errorMessage) - return "", fmt.Errorf(errorMessage) - } - - defer registryKey.Close() - - installDir, _, err := registryKey.GetStringValue("InstallDir") - if err != nil { - errorMessage := fmt.Sprintf("Can't find InstallDir registry key within VirtualBox registries entries, is VirtualBox really installed properly? %s", err) - log.Debugf(errorMessage) - return "", fmt.Errorf(errorMessage) - } - - return installDir, nil -} - -func getShareDriveAndName() (string, string) { - return "c/Users", "\\\\?\\c:\\Users" -} - -func isHyperVInstalled() bool { - // check if hyper-v is installed - _, err := exec.LookPath("vmms.exe") - if err != nil { - errmsg := "Hyper-V is not installed." - log.Debugf(errmsg, err) - return false - } - - // check to see if a hypervisor is present. if hyper-v is installed and enabled, - // display an error explaining the incompatibility between virtualbox and hyper-v. - output, err := cmdOutput("wmic", "computersystem", "get", "hypervisorpresent") - - if err != nil { - errmsg := "Could not check to see if Hyper-V is running." - log.Debugf(errmsg, err) - return false - } - - enabled := strings.Contains(output, "TRUE") - return enabled - -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/vm.go b/vendor/github.com/docker/machine/drivers/virtualbox/vm.go deleted file mode 100644 index cd95ada76727..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/vm.go +++ /dev/null @@ -1,41 +0,0 @@ -package virtualbox - -import "strconv" - -type VM struct { - CPUs int - Memory int -} - -func getVMInfo(name string, vbox VBoxManager) (*VM, error) { - out, err := vbox.vbmOut("showvminfo", name, "--machinereadable") - if err != nil { - return nil, err - } - - vm := &VM{} - - err = parseKeyValues(out, reEqualLine, func(key, val string) error { - switch key { - case "cpus": - v, err := strconv.Atoi(val) - if err != nil { - return err - } - vm.CPUs = v - case "memory": - v, err := strconv.Atoi(val) - if err != nil { - return err - } - vm.Memory = v - } - - return nil - }) - if err != nil { - return nil, err - } - - return vm, nil -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/vtx.go b/vendor/github.com/docker/machine/drivers/virtualbox/vtx.go deleted file mode 100644 index 7adb631fe965..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/vtx.go +++ /dev/null @@ -1,28 +0,0 @@ -package virtualbox - -import "strings" - -// IsVTXDisabledInTheVM checks if VT-X is disabled in the started vm. -func (d *Driver) IsVTXDisabledInTheVM() (bool, error) { - lines, err := d.readVBoxLog() - if err != nil { - return true, err - } - - for _, line := range lines { - if strings.Contains(line, "VT-x is disabled") && !strings.Contains(line, "Falling back to raw-mode: VT-x is disabled in the BIOS for all CPU modes") { - return true, nil - } - if strings.Contains(line, "the host CPU does NOT support HW virtualization") { - return true, nil - } - if strings.Contains(line, "VERR_VMX_UNABLE_TO_START_VM") { - return true, nil - } - if strings.Contains(line, "Power up failed") && strings.Contains(line, "VERR_VMX_NO_VMX") { - return true, nil - } - } - - return false, nil -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/vtx_intel.go b/vendor/github.com/docker/machine/drivers/virtualbox/vtx_intel.go deleted file mode 100644 index 852f451d1379..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/vtx_intel.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build 386 amd64 - -package virtualbox - -import "github.com/intel-go/cpuid" - -// IsVTXDisabled checks if VT-x is disabled in the CPU. -func (d *Driver) IsVTXDisabled() bool { - if cpuid.HasFeature(cpuid.VMX) || cpuid.HasExtraFeature(cpuid.SVM) { - return false - } - - return true -} diff --git a/vendor/github.com/docker/machine/drivers/virtualbox/vtx_other.go b/vendor/github.com/docker/machine/drivers/virtualbox/vtx_other.go deleted file mode 100644 index 5dbfd17553b1..000000000000 --- a/vendor/github.com/docker/machine/drivers/virtualbox/vtx_other.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !386,!amd64 - -package virtualbox - -// IsVTXDisabled checks if VT-x is disabled in the CPU. -func (d *Driver) IsVTXDisabled() bool { - return true -} diff --git a/vendor/github.com/docker/machine/drivers/vmwarefusion/fusion.go b/vendor/github.com/docker/machine/drivers/vmwarefusion/fusion.go deleted file mode 100644 index be433e5fe7d5..000000000000 --- a/vendor/github.com/docker/machine/drivers/vmwarefusion/fusion.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !darwin - -package vmwarefusion - -import "github.com/docker/machine/libmachine/drivers" - -func NewDriver(hostName, storePath string) drivers.Driver { - return drivers.NewDriverNotSupported("vmwarefusion", hostName, storePath) -} diff --git a/vendor/github.com/docker/machine/drivers/vmwarefusion/fusion_darwin.go b/vendor/github.com/docker/machine/drivers/vmwarefusion/fusion_darwin.go deleted file mode 100644 index 1211304e31e5..000000000000 --- a/vendor/github.com/docker/machine/drivers/vmwarefusion/fusion_darwin.go +++ /dev/null @@ -1,785 +0,0 @@ -/* - * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. - */ - -package vmwarefusion - -import ( - "archive/tar" - "bytes" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "regexp" - "runtime" - "strings" - "text/template" - "time" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" - cryptossh "golang.org/x/crypto/ssh" -) - -const ( - B2DUser = "docker" - B2DPass = "tcuser" - isoFilename = "boot2docker.iso" - isoConfigDrive = "configdrive.iso" -) - -// Driver for VMware Fusion -type Driver struct { - *drivers.BaseDriver - Memory int - DiskSize int - CPU int - ISO string - Boot2DockerURL string - - SSHPassword string - ConfigDriveISO string - ConfigDriveURL string - NoShare bool -} - -const ( - defaultSSHUser = B2DUser - defaultSSHPass = B2DPass - defaultDiskSize = 20000 - defaultCPU = 1 - defaultMemory = 1024 -) - -// GetCreateFlags registers the flags this driver adds to -// "docker hosts create" -func (d *Driver) GetCreateFlags() []mcnflag.Flag { - return []mcnflag.Flag{ - mcnflag.StringFlag{ - EnvVar: "FUSION_BOOT2DOCKER_URL", - Name: "vmwarefusion-boot2docker-url", - Usage: "Fusion URL for boot2docker image", - Value: "", - }, - mcnflag.StringFlag{ - EnvVar: "FUSION_CONFIGDRIVE_URL", - Name: "vmwarefusion-configdrive-url", - Usage: "Fusion URL for cloud-init configdrive", - Value: "", - }, - mcnflag.IntFlag{ - EnvVar: "FUSION_CPU_COUNT", - Name: "vmwarefusion-cpu-count", - Usage: "number of CPUs for the machine (-1 to use the number of CPUs available)", - Value: defaultCPU, - }, - mcnflag.IntFlag{ - EnvVar: "FUSION_MEMORY_SIZE", - Name: "vmwarefusion-memory-size", - Usage: "Fusion size of memory for host VM (in MB)", - Value: defaultMemory, - }, - mcnflag.IntFlag{ - EnvVar: "FUSION_DISK_SIZE", - Name: "vmwarefusion-disk-size", - Usage: "Fusion size of disk for host VM (in MB)", - Value: defaultDiskSize, - }, - mcnflag.StringFlag{ - EnvVar: "FUSION_SSH_USER", - Name: "vmwarefusion-ssh-user", - Usage: "SSH user", - Value: defaultSSHUser, - }, - mcnflag.StringFlag{ - EnvVar: "FUSION_SSH_PASSWORD", - Name: "vmwarefusion-ssh-password", - Usage: "SSH password", - Value: defaultSSHPass, - }, - mcnflag.BoolFlag{ - EnvVar: "FUSION_NO_SHARE", - Name: "vmwarefusion-no-share", - Usage: "Disable the mount of your home directory", - }, - } -} - -func NewDriver(hostName, storePath string) drivers.Driver { - return &Driver{ - CPU: defaultCPU, - Memory: defaultMemory, - DiskSize: defaultDiskSize, - SSHPassword: defaultSSHPass, - BaseDriver: &drivers.BaseDriver{ - SSHUser: defaultSSHUser, - MachineName: hostName, - StorePath: storePath, - }, - } -} - -func (d *Driver) GetSSHHostname() (string, error) { - return d.GetIP() -} - -func (d *Driver) GetSSHUsername() string { - if d.SSHUser == "" { - d.SSHUser = "docker" - } - - return d.SSHUser -} - -// DriverName returns the name of the driver -func (d *Driver) DriverName() string { - return "vmwarefusion" -} - -func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error { - d.Memory = flags.Int("vmwarefusion-memory-size") - d.CPU = flags.Int("vmwarefusion-cpu-count") - d.DiskSize = flags.Int("vmwarefusion-disk-size") - d.Boot2DockerURL = flags.String("vmwarefusion-boot2docker-url") - d.ConfigDriveURL = flags.String("vmwarefusion-configdrive-url") - d.ISO = d.ResolveStorePath(isoFilename) - d.ConfigDriveISO = d.ResolveStorePath(isoConfigDrive) - d.SetSwarmConfigFromFlags(flags) - d.SSHUser = flags.String("vmwarefusion-ssh-user") - d.SSHPassword = flags.String("vmwarefusion-ssh-password") - d.SSHPort = 22 - d.NoShare = flags.Bool("vmwarefusion-no-share") - - // We support a maximum of 16 cpu to be consistent with Virtual Hardware 10 - // specs. - if d.CPU < 1 { - d.CPU = int(runtime.NumCPU()) - } - if d.CPU > 16 { - d.CPU = 16 - } - - return nil -} - -func (d *Driver) GetURL() (string, error) { - ip, err := d.GetIP() - if err != nil { - return "", err - } - if ip == "" { - return "", nil - } - return fmt.Sprintf("tcp://%s", net.JoinHostPort(ip, "2376")), nil -} - -func (d *Driver) GetIP() (string, error) { - s, err := d.GetState() - if err != nil { - return "", err - } - if s != state.Running { - return "", drivers.ErrHostIsNotRunning - } - - // determine MAC address for VM - macaddr, err := d.getMacAddressFromVmx() - if err != nil { - return "", err - } - - // attempt to find the address in the vmnet configuration - if ip, err := d.getIPfromVmnetConfiguration(macaddr); err == nil { - return ip, err - } - - // address not found in vmnet so look for a DHCP lease - ip, err := d.getIPfromDHCPLease(macaddr) - if err != nil { - return "", err - } - - return ip, nil -} - -func (d *Driver) GetState() (state.State, error) { - // VMRUN only tells use if the vm is running or not - vmxp, err := filepath.EvalSymlinks(d.vmxPath()) - if err != nil { - return state.Error, err - } - if stdout, _, _ := vmrun("list"); strings.Contains(stdout, vmxp) { - return state.Running, nil - } - return state.Stopped, nil -} - -// PreCreateCheck checks that the machine creation process can be started safely. -func (d *Driver) PreCreateCheck() error { - // Downloading boot2docker to cache should be done here to make sure - // that a download failure will not leave a machine half created. - b2dutils := mcnutils.NewB2dUtils(d.StorePath) - - return b2dutils.UpdateISOCache(d.Boot2DockerURL) -} - -func (d *Driver) Create() error { - b2dutils := mcnutils.NewB2dUtils(d.StorePath) - if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil { - return err - } - - // download cloud-init config drive - if d.ConfigDriveURL != "" { - if err := b2dutils.DownloadISO(d.ResolveStorePath("."), isoConfigDrive, d.ConfigDriveURL); err != nil { - return err - } - } - - log.Infof("Creating SSH key...") - if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil { - return err - } - - log.Infof("Creating VM...") - if err := os.MkdirAll(d.ResolveStorePath("."), 0755); err != nil { - return err - } - - if _, err := os.Stat(d.vmxPath()); err == nil { - return ErrMachineExist - } - - // Generate vmx config file from template - vmxt := template.Must(template.New("vmx").Parse(vmx)) - vmxfile, err := os.Create(d.vmxPath()) - if err != nil { - return err - } - vmxt.Execute(vmxfile, d) - - // Generate vmdk file - diskImg := d.ResolveStorePath(fmt.Sprintf("%s.vmdk", d.MachineName)) - if _, err := os.Stat(diskImg); err != nil { - if !os.IsNotExist(err) { - return err - } - - if err := vdiskmanager(diskImg, d.DiskSize); err != nil { - return err - } - } - - log.Infof("Starting %s...", d.MachineName) - _, _, err = vmrun("start", d.vmxPath(), "nogui") - if err != nil { - return err - } - - var ip string - - log.Infof("Waiting for VM to come online...") - for i := 1; i <= 60; i++ { - ip, err = d.GetIP() - if err != nil { - log.Debugf("Not there yet %d/%d, error: %s", i, 60, err) - time.Sleep(2 * time.Second) - continue - } - - if ip != "" { - log.Debugf("Got an ip: %s", ip) - conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, 22), time.Duration(2*time.Second)) - if err != nil { - log.Debugf("SSH Daemon not responding yet: %s", err) - time.Sleep(2 * time.Second) - continue - } - conn.Close() - break - } - } - - if ip == "" { - return fmt.Errorf("Machine didn't return an IP after 120 seconds, aborting") - } - - // we got an IP, let's copy ssh keys over - d.IPAddress = ip - - // Do not execute the rest of boot2docker specific configuration - // The upload of the public ssh key uses a ssh connection, - // this works without installed vmware client tools - if d.ConfigDriveURL != "" { - var keyfh *os.File - var keycontent []byte - - log.Infof("Copy public SSH key to %s [%s]", d.MachineName, d.IPAddress) - - // create .ssh folder in users home - if err := executeSSHCommand(fmt.Sprintf("mkdir -p /home/%s/.ssh", d.SSHUser), d); err != nil { - return err - } - - // read generated public ssh key - if keyfh, err = os.Open(d.publicSSHKeyPath()); err != nil { - return err - } - defer keyfh.Close() - - if keycontent, err = ioutil.ReadAll(keyfh); err != nil { - return err - } - - // add public ssh key to authorized_keys - if err := executeSSHCommand(fmt.Sprintf("echo '%s' > /home/%s/.ssh/authorized_keys", string(keycontent), d.SSHUser), d); err != nil { - return err - } - - // make it secure - if err := executeSSHCommand(fmt.Sprintf("chmod 600 /home/%s/.ssh/authorized_keys", d.SSHUser), d); err != nil { - return err - } - - log.Debugf("Leaving create sequence early, configdrive found") - return nil - } - - // Generate a tar keys bundle - if err := d.generateKeyBundle(); err != nil { - return err - } - - // Test if /var/lib/boot2docker exists - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "directoryExistsInGuest", d.vmxPath(), "/var/lib/boot2docker") - if err != nil { - return err - } - - // Copy SSH keys bundle - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "CopyFileFromHostToGuest", d.vmxPath(), d.ResolveStorePath("userdata.tar"), "/home/docker/userdata.tar") - if err != nil { - return err - } - - // Expand tar file. - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "runScriptInGuest", d.vmxPath(), "/bin/sh", "sudo sh -c \"tar xvf /home/docker/userdata.tar -C /home/docker > /var/log/userdata.log 2>&1 && chown -R docker:staff /home/docker\"") - if err != nil { - return err - } - - // copy to /var/lib/boot2docker - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "runScriptInGuest", d.vmxPath(), "/bin/sh", "sudo /bin/mv /home/docker/userdata.tar /var/lib/boot2docker/userdata.tar") - if err != nil { - return err - } - - // Enable Shared Folders - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "enableSharedFolders", d.vmxPath()) - if err != nil { - return err - } - - var shareName, shareDir string // TODO configurable at some point - switch runtime.GOOS { - case "darwin": - shareName = "Users" - shareDir = "/Users" - // TODO "linux" and "windows" - } - - if shareDir != "" && !d.NoShare { - if _, err := os.Stat(shareDir); err != nil && !os.IsNotExist(err) { - return err - } else if !os.IsNotExist(err) { - // add shared folder, create mountpoint and mount it. - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "addSharedFolder", d.vmxPath(), shareName, shareDir) - if err != nil { - return err - } - command := "([ ! -d " + shareDir + " ]&& sudo mkdir " + shareDir + "; sudo mount --bind /mnt/hgfs/" + shareDir + " " + shareDir + ") || ([ -f /usr/local/bin/vmhgfs-fuse ]&& sudo /usr/local/bin/vmhgfs-fuse -o allow_other .host:/" + shareName + " " + shareDir + ") || sudo mount -t vmhgfs -o uid=$(id -u),gid=$(id -g) .host:/" + shareName + " " + shareDir - _, _, err = vmrun("-gu", B2DUser, "-gp", B2DPass, "runScriptInGuest", d.vmxPath(), "/bin/sh", command) - if err != nil { - return err - } - } - } - return nil -} - -func (d *Driver) Start() error { - vmrun("start", d.vmxPath(), "nogui") - - // Do not execute the rest of boot2docker specific configuration, exit here - if d.ConfigDriveURL != "" { - log.Debugf("Leaving start sequence early, configdrive found") - return nil - } - - log.Debugf("Mounting Shared Folders...") - var shareName, shareDir string // TODO configurable at some point - switch runtime.GOOS { - case "darwin": - shareName = "Users" - shareDir = "/Users" - // TODO "linux" and "windows" - } - - if shareDir != "" { - if _, err := os.Stat(shareDir); err != nil && !os.IsNotExist(err) { - return err - } else if !os.IsNotExist(err) { - // create mountpoint and mount shared folder - command := "([ ! -d " + shareDir + " ]&& sudo mkdir " + shareDir + "; sudo mount --bind /mnt/hgfs/" + shareDir + " " + shareDir + ") || ([ -f /usr/local/bin/vmhgfs-fuse ]&& sudo /usr/local/bin/vmhgfs-fuse -o allow_other .host:/" + shareName + " " + shareDir + ") || sudo mount -t vmhgfs -o uid=$(id -u),gid=$(id -g) .host:/" + shareName + " " + shareDir - vmrun("-gu", B2DUser, "-gp", B2DPass, "runScriptInGuest", d.vmxPath(), "/bin/sh", command) - } - } - - return nil -} - -func (d *Driver) Stop() error { - _, _, err := vmrun("stop", d.vmxPath(), "nogui") - return err -} - -func (d *Driver) Restart() error { - // Stop VM gracefully - if err := d.Stop(); err != nil { - return err - } - // Start it again and mount shared folder - return d.Start() -} - -func (d *Driver) Kill() error { - _, _, err := vmrun("stop", d.vmxPath(), "hard nogui") - return err -} - -func (d *Driver) Remove() error { - s, _ := d.GetState() - if s == state.Running { - if err := d.Kill(); err != nil { - return fmt.Errorf("Error stopping VM before deletion") - } - } - log.Infof("Deleting %s...", d.MachineName) - vmrun("deleteVM", d.vmxPath(), "nogui") - return nil -} - -func (d *Driver) Upgrade() error { - return fmt.Errorf("VMware Fusion does not currently support the upgrade operation") -} - -func (d *Driver) vmxPath() string { - return d.ResolveStorePath(fmt.Sprintf("%s.vmx", d.MachineName)) -} - -func (d *Driver) vmdkPath() string { - return d.ResolveStorePath(fmt.Sprintf("%s.vmdk", d.MachineName)) -} - -func (d *Driver) getMacAddressFromVmx() (string, error) { - var vmxfh *os.File - var vmxcontent []byte - var err error - - if vmxfh, err = os.Open(d.vmxPath()); err != nil { - return "", err - } - defer vmxfh.Close() - - if vmxcontent, err = ioutil.ReadAll(vmxfh); err != nil { - return "", err - } - - // Look for generatedAddress as we're passing a VMX with addressType = "generated". - var macaddr string - vmxparse := regexp.MustCompile(`^ethernet0.generatedAddress\s*=\s*"(.*?)"\s*$`) - for _, line := range strings.Split(string(vmxcontent), "\n") { - if matches := vmxparse.FindStringSubmatch(line); matches == nil { - continue - } else { - macaddr = strings.ToLower(matches[1]) - } - } - - if macaddr == "" { - return "", fmt.Errorf("couldn't find MAC address in VMX file %s", d.vmxPath()) - } - - log.Debugf("MAC address in VMX: %s", macaddr) - - return macaddr, nil -} - -func (d *Driver) getIPfromVmnetConfiguration(macaddr string) (string, error) { - - // DHCP lease table for NAT vmnet interface - confFiles, _ := filepath.Glob("/Library/Preferences/VMware Fusion/vmnet*/dhcpd.conf") - for _, conffile := range confFiles { - log.Debugf("Trying to find IP address in configuration file: %s", conffile) - if ipaddr, err := d.getIPfromVmnetConfigurationFile(conffile, macaddr); err == nil { - return ipaddr, err - } - } - - return "", fmt.Errorf("IP not found for MAC %s in vmnet configuration files", macaddr) -} - -func (d *Driver) getIPfromVmnetConfigurationFile(conffile, macaddr string) (string, error) { - var conffh *os.File - var confcontent []byte - - var currentip string - var lastipmatch string - var lastmacmatch string - - var err error - - if conffh, err = os.Open(conffile); err != nil { - return "", err - } - defer conffh.Close() - - if confcontent, err = ioutil.ReadAll(conffh); err != nil { - return "", err - } - - // find all occurrences of 'host .* { .. }' and extract - // out of the inner block the MAC and IP addresses - - // key = MAC, value = IP - m := make(map[string]string) - - // Begin of a host block, that contains the IP, MAC - hostbegin := regexp.MustCompile(`^host (.+?) {`) - // End of a host block - hostend := regexp.MustCompile(`^}`) - - // Get the IP address. - ip := regexp.MustCompile(`^\s*fixed-address (.+?);$`) - // Get the MAC address associated. - mac := regexp.MustCompile(`^\s*hardware ethernet (.+?);$`) - - // we use a block depth so that just in case inner blocks exists - // we are not being fooled by them - blockdepth := 0 - for _, line := range strings.Split(string(confcontent), "\n") { - - if matches := hostbegin.FindStringSubmatch(line); matches != nil { - blockdepth = blockdepth + 1 - continue - } - - // we are only in interested in endings if we in a block. Otherwise we will count - // ending of non host blocks as well - if matches := hostend.FindStringSubmatch(line); blockdepth > 0 && matches != nil { - blockdepth = blockdepth - 1 - - if blockdepth == 0 { - // add data - m[lastmacmatch] = lastipmatch - - // reset all temp var holders - lastipmatch = "" - lastmacmatch = "" - } - - continue - } - - // only if we are within the first level of a block - // we are looking for addresses to extract - if blockdepth == 1 { - if matches := ip.FindStringSubmatch(line); matches != nil { - lastipmatch = matches[1] - continue - } - - if matches := mac.FindStringSubmatch(line); matches != nil { - lastmacmatch = strings.ToLower(matches[1]) - continue - } - } - } - - log.Debugf("Following IPs found %s", m) - - // map is filled to now lets check if we have a MAC associated to an IP - currentip, ok := m[strings.ToLower(macaddr)] - - if !ok { - return "", fmt.Errorf("IP not found for MAC %s in vmnet configuration", macaddr) - } - - log.Debugf("IP found in vmnet configuration file: %s", currentip) - - return currentip, nil - -} - -func (d *Driver) getIPfromDHCPLease(macaddr string) (string, error) { - - // DHCP lease table for NAT vmnet interface - leasesFiles, _ := filepath.Glob("/var/db/vmware/*.leases") - for _, dhcpfile := range leasesFiles { - log.Debugf("Trying to find IP address in leases file: %s", dhcpfile) - if ipaddr, err := d.getIPfromDHCPLeaseFile(dhcpfile, macaddr); err == nil { - return ipaddr, err - } - } - - return "", fmt.Errorf("IP not found for MAC %s in DHCP leases", macaddr) -} - -func (d *Driver) getIPfromDHCPLeaseFile(dhcpfile, macaddr string) (string, error) { - - var dhcpfh *os.File - var dhcpcontent []byte - var lastipmatch string - var currentip string - var lastleaseendtime time.Time - var currentleadeendtime time.Time - var err error - - if dhcpfh, err = os.Open(dhcpfile); err != nil { - return "", err - } - defer dhcpfh.Close() - - if dhcpcontent, err = ioutil.ReadAll(dhcpfh); err != nil { - return "", err - } - - // Get the IP from the lease table. - leaseip := regexp.MustCompile(`^lease (.+?) {$`) - // Get the lease end date time. - leaseend := regexp.MustCompile(`^\s*ends \d (.+?);$`) - // Get the MAC address associated. - leasemac := regexp.MustCompile(`^\s*hardware ethernet (.+?);$`) - - for _, line := range strings.Split(string(dhcpcontent), "\n") { - - if matches := leaseip.FindStringSubmatch(line); matches != nil { - lastipmatch = matches[1] - continue - } - - if matches := leaseend.FindStringSubmatch(line); matches != nil { - lastleaseendtime, _ = time.Parse("2006/01/02 15:04:05", matches[1]) - continue - } - - if matches := leasemac.FindStringSubmatch(line); matches != nil && matches[1] == macaddr && currentleadeendtime.Before(lastleaseendtime) { - currentip = lastipmatch - currentleadeendtime = lastleaseendtime - } - } - - if currentip == "" { - return "", fmt.Errorf("IP not found for MAC %s in DHCP leases", macaddr) - } - - log.Debugf("IP found in DHCP lease table: %s", currentip) - - return currentip, nil -} - -func (d *Driver) publicSSHKeyPath() string { - return d.GetSSHKeyPath() + ".pub" -} - -// Make a boot2docker userdata.tar key bundle -func (d *Driver) generateKeyBundle() error { - log.Debugf("Creating Tar key bundle...") - - magicString := "boot2docker, this is vmware speaking" - - tf, err := os.Create(d.ResolveStorePath("userdata.tar")) - if err != nil { - return err - } - defer tf.Close() - var fileWriter = tf - - tw := tar.NewWriter(fileWriter) - defer tw.Close() - - // magicString first so we can figure out who originally wrote the tar. - file := &tar.Header{Name: magicString, Size: int64(len(magicString))} - if err := tw.WriteHeader(file); err != nil { - return err - } - if _, err := tw.Write([]byte(magicString)); err != nil { - return err - } - // .ssh/key.pub => authorized_keys - file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700} - if err := tw.WriteHeader(file); err != nil { - return err - } - pubKey, err := ioutil.ReadFile(d.publicSSHKeyPath()) - if err != nil { - return err - } - file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644} - if err := tw.WriteHeader(file); err != nil { - return err - } - if _, err := tw.Write([]byte(pubKey)); err != nil { - return err - } - file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644} - if err := tw.WriteHeader(file); err != nil { - return err - } - if _, err := tw.Write([]byte(pubKey)); err != nil { - return err - } - return tw.Close() -} - -// execute command over SSH with user / password authentication -func executeSSHCommand(command string, d *Driver) error { - log.Debugf("Execute executeSSHCommand: %s", command) - - config := &cryptossh.ClientConfig{ - User: d.SSHUser, - Auth: []cryptossh.AuthMethod{ - cryptossh.Password(d.SSHPassword), - }, - } - - client, err := cryptossh.Dial("tcp", fmt.Sprintf("%s:%d", d.IPAddress, d.SSHPort), config) - if err != nil { - log.Debugf("Failed to dial:", err) - return err - } - - session, err := client.NewSession() - if err != nil { - log.Debugf("Failed to create session: " + err.Error()) - return err - } - defer session.Close() - - var b bytes.Buffer - session.Stdout = &b - - if err := session.Run(command); err != nil { - log.Debugf("Failed to run: " + err.Error()) - return err - } - log.Debugf("Stdout from executeSSHCommand: %s", b.String()) - - return nil -} diff --git a/vendor/github.com/docker/machine/drivers/vmwarefusion/vmrun_darwin.go b/vendor/github.com/docker/machine/drivers/vmwarefusion/vmrun_darwin.go deleted file mode 100644 index 3039ea177c96..000000000000 --- a/vendor/github.com/docker/machine/drivers/vmwarefusion/vmrun_darwin.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. - */ - -package vmwarefusion - -import ( - "bytes" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/machine/libmachine/log" -) - -var ( - vmrunbin = setVmwareCmd("vmrun") - vdiskmanbin = setVmwareCmd("vmware-vdiskmanager") -) - -var ( - ErrMachineExist = errors.New("machine already exists") - ErrMachineNotExist = errors.New("machine does not exist") - ErrVMRUNNotFound = errors.New("VMRUN not found") -) - -// detect the vmrun and vmware-vdiskmanager cmds' path if needed -func setVmwareCmd(cmd string) string { - if path, err := exec.LookPath(cmd); err == nil { - return path - } - return filepath.Join("/Applications/VMware Fusion.app/Contents/Library/", cmd) -} - -func vmrun(args ...string) (string, string, error) { - // vmrun with nogui on VMware Fusion through at least 8.0.1 doesn't work right - // if the umask is set to not allow world-readable permissions - _ = syscall.Umask(022) - cmd := exec.Command(vmrunbin, args...) - if os.Getenv("MACHINE_DEBUG") != "" { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout, cmd.Stderr = &stdout, &stderr - log.Debugf("executing: %v %v", vmrunbin, strings.Join(args, " ")) - - err := cmd.Run() - if err != nil { - if ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound { - err = ErrVMRUNNotFound - } - } - - return stdout.String(), stderr.String(), err -} - -// Make a vmdk disk image with the given size (in MB). -func vdiskmanager(dest string, size int) error { - cmd := exec.Command(vdiskmanbin, "-c", "-t", "0", "-s", fmt.Sprintf("%dMB", size), "-a", "lsilogic", dest) - if os.Getenv("MACHINE_DEBUG") != "" { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - - if stdout := cmd.Run(); stdout != nil { - if ee, ok := stdout.(*exec.Error); ok && ee == exec.ErrNotFound { - return ErrVMRUNNotFound - } - } - return nil -} diff --git a/vendor/github.com/docker/machine/drivers/vmwarefusion/vmx_darwin.go b/vendor/github.com/docker/machine/drivers/vmwarefusion/vmx_darwin.go deleted file mode 100644 index 0ca6e63b28dd..000000000000 --- a/vendor/github.com/docker/machine/drivers/vmwarefusion/vmx_darwin.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. - */ - -package vmwarefusion - -const vmx = ` -.encoding = "UTF-8" -config.version = "8" -displayName = "{{.MachineName}}" -ethernet0.present = "TRUE" -ethernet0.connectionType = "nat" -ethernet0.virtualDev = "vmxnet3" -ethernet0.wakeOnPcktRcv = "FALSE" -ethernet0.addressType = "generated" -ethernet0.linkStatePropagation.enable = "TRUE" -pciBridge0.present = "TRUE" -pciBridge4.present = "TRUE" -pciBridge4.virtualDev = "pcieRootPort" -pciBridge4.functions = "8" -pciBridge5.present = "TRUE" -pciBridge5.virtualDev = "pcieRootPort" -pciBridge5.functions = "8" -pciBridge6.present = "TRUE" -pciBridge6.virtualDev = "pcieRootPort" -pciBridge6.functions = "8" -pciBridge7.present = "TRUE" -pciBridge7.virtualDev = "pcieRootPort" -pciBridge7.functions = "8" -pciBridge0.pciSlotNumber = "17" -pciBridge4.pciSlotNumber = "21" -pciBridge5.pciSlotNumber = "22" -pciBridge6.pciSlotNumber = "23" -pciBridge7.pciSlotNumber = "24" -scsi0.pciSlotNumber = "160" -usb.pciSlotNumber = "32" -ethernet0.pciSlotNumber = "192" -sound.pciSlotNumber = "33" -vmci0.pciSlotNumber = "35" -sata0.pciSlotNumber = "36" -floppy0.present = "FALSE" -guestOS = "other3xlinux-64" -hpet0.present = "TRUE" -sata0.present = "TRUE" -sata0:1.present = "TRUE" -sata0:1.fileName = "{{.ISO}}" -sata0:1.deviceType = "cdrom-image" -{{ if .ConfigDriveURL }} -sata0:2.present = "TRUE" -sata0:2.fileName = "{{.ConfigDriveISO}}" -sata0:2.deviceType = "cdrom-image" -{{ end }} -vmci0.present = "TRUE" -mem.hotadd = "TRUE" -memsize = "{{.Memory}}" -powerType.powerOff = "soft" -powerType.powerOn = "soft" -powerType.reset = "soft" -powerType.suspend = "soft" -scsi0.present = "TRUE" -scsi0.virtualDev = "pvscsi" -scsi0:0.fileName = "{{.MachineName}}.vmdk" -scsi0:0.present = "TRUE" -tools.synctime = "TRUE" -virtualHW.productCompatibility = "hosted" -virtualHW.version = "10" -msg.autoanswer = "TRUE" -uuid.action = "create" -numvcpus = "{{.CPU}}" -hgfs.mapRootShare = "FALSE" -hgfs.linkRootShare = "FALSE" -` diff --git a/vendor/github.com/docker/machine/libmachine/auth/auth.go b/vendor/github.com/docker/machine/libmachine/auth/auth.go deleted file mode 100644 index 86ae79d2326e..000000000000 --- a/vendor/github.com/docker/machine/libmachine/auth/auth.go +++ /dev/null @@ -1,18 +0,0 @@ -package auth - -type Options struct { - CertDir string - CaCertPath string - CaPrivateKeyPath string - CaCertRemotePath string - ServerCertPath string - ServerKeyPath string - ClientKeyPath string - ServerCertRemotePath string - ServerKeyRemotePath string - ClientCertPath string - ServerCertSANs []string - // StorePath is left in for historical reasons, but not really meant to - // be used directly. - StorePath string -} diff --git a/vendor/github.com/docker/machine/libmachine/cert/bootstrap.go b/vendor/github.com/docker/machine/libmachine/cert/bootstrap.go deleted file mode 100644 index 59543bcb0502..000000000000 --- a/vendor/github.com/docker/machine/libmachine/cert/bootstrap.go +++ /dev/null @@ -1,136 +0,0 @@ -package cert - -import ( - "errors" - "fmt" - "os" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" -) - -func createCACert(authOptions *auth.Options, caOrg string, bits int) error { - caCertPath := authOptions.CaCertPath - caPrivateKeyPath := authOptions.CaPrivateKeyPath - - log.Infof("Creating CA: %s", caCertPath) - - // check if the key path exists; if so, error - if _, err := os.Stat(caPrivateKeyPath); err == nil { - return errors.New("certificate authority key already exists") - } - - if err := GenerateCACertificate(caCertPath, caPrivateKeyPath, caOrg, bits); err != nil { - return fmt.Errorf("generating CA certificate failed: %s", err) - } - - return nil -} - -func createCert(authOptions *auth.Options, org string, bits int) error { - certDir := authOptions.CertDir - caCertPath := authOptions.CaCertPath - caPrivateKeyPath := authOptions.CaPrivateKeyPath - clientCertPath := authOptions.ClientCertPath - clientKeyPath := authOptions.ClientKeyPath - - log.Infof("Creating client certificate: %s", clientCertPath) - - if _, err := os.Stat(certDir); err != nil { - if os.IsNotExist(err) { - if err := os.Mkdir(certDir, 0700); err != nil { - return fmt.Errorf("failure creating machine client cert dir: %s", err) - } - } else { - return err - } - } - - // check if the key path exists; if so, error - if _, err := os.Stat(clientKeyPath); err == nil { - return errors.New("client key already exists") - } - - // Used to generate the client certificate. - certOptions := &Options{ - Hosts: []string{""}, - CertFile: clientCertPath, - KeyFile: clientKeyPath, - CAFile: caCertPath, - CAKeyFile: caPrivateKeyPath, - Org: org, - Bits: bits, - SwarmMaster: false, - } - - if err := GenerateCert(certOptions); err != nil { - return fmt.Errorf("failure generating client certificate: %s", err) - } - - return nil -} - -func BootstrapCertificates(authOptions *auth.Options) error { - certDir := authOptions.CertDir - caCertPath := authOptions.CaCertPath - clientCertPath := authOptions.ClientCertPath - clientKeyPath := authOptions.ClientKeyPath - caPrivateKeyPath := authOptions.CaPrivateKeyPath - - // TODO: I'm not super happy about this use of "org", the user should - // have to specify it explicitly instead of implicitly basing it on - // $USER. - caOrg := mcnutils.GetUsername() - org := caOrg + "." - - bits := 2048 - - if _, err := os.Stat(certDir); err != nil { - if os.IsNotExist(err) { - if err := os.MkdirAll(certDir, 0700); err != nil { - return fmt.Errorf("creating machine certificate dir failed: %s", err) - } - } else { - return err - } - } - - if _, err := os.Stat(caCertPath); os.IsNotExist(err) { - if err := createCACert(authOptions, caOrg, bits); err != nil { - return err - } - } else { - current, err := CheckCertificateDate(caCertPath) - if err != nil { - return err - } - if !current { - log.Info("CA certificate is outdated and needs to be regenerated") - os.Remove(caPrivateKeyPath) - if err := createCACert(authOptions, caOrg, bits); err != nil { - return err - } - } - } - - if _, err := os.Stat(clientCertPath); os.IsNotExist(err) { - if err := createCert(authOptions, org, bits); err != nil { - return err - } - } else { - current, err := CheckCertificateDate(clientCertPath) - if err != nil { - return err - } - if !current { - log.Info("Client certificate is outdated and needs to be regenerated") - os.Remove(clientKeyPath) - if err := createCert(authOptions, org, bits); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/cert/cert.go b/vendor/github.com/docker/machine/libmachine/cert/cert.go deleted file mode 100644 index a0d9c732ed44..000000000000 --- a/vendor/github.com/docker/machine/libmachine/cert/cert.go +++ /dev/null @@ -1,294 +0,0 @@ -package cert - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "io/ioutil" - "math/big" - "net" - "os" - "time" - - "errors" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/log" -) - -var defaultGenerator = NewX509CertGenerator() - -type Options struct { - Hosts []string - CertFile, KeyFile, CAFile, CAKeyFile, Org string - Bits int - SwarmMaster bool -} - -type Generator interface { - GenerateCACertificate(certFile, keyFile, org string, bits int) error - GenerateCert(opts *Options) error - ReadTLSConfig(addr string, authOptions *auth.Options) (*tls.Config, error) - ValidateCertificate(addr string, authOptions *auth.Options) (bool, error) -} - -type X509CertGenerator struct{} - -func NewX509CertGenerator() Generator { - return &X509CertGenerator{} -} - -func GenerateCACertificate(certFile, keyFile, org string, bits int) error { - return defaultGenerator.GenerateCACertificate(certFile, keyFile, org, bits) -} - -func GenerateCert(opts *Options) error { - return defaultGenerator.GenerateCert(opts) -} - -func ValidateCertificate(addr string, authOptions *auth.Options) (bool, error) { - return defaultGenerator.ValidateCertificate(addr, authOptions) -} - -func ReadTLSConfig(addr string, authOptions *auth.Options) (*tls.Config, error) { - return defaultGenerator.ReadTLSConfig(addr, authOptions) -} - -func SetCertGenerator(cg Generator) { - defaultGenerator = cg -} - -func (xcg *X509CertGenerator) getTLSConfig(caCert, cert, key []byte, allowInsecure bool) (*tls.Config, error) { - // TLS config - var tlsConfig tls.Config - tlsConfig.InsecureSkipVerify = allowInsecure - certPool := x509.NewCertPool() - - ok := certPool.AppendCertsFromPEM(caCert) - if !ok { - return &tlsConfig, errors.New("There was an error reading certificate") - } - - tlsConfig.RootCAs = certPool - keypair, err := tls.X509KeyPair(cert, key) - if err != nil { - return &tlsConfig, err - } - tlsConfig.Certificates = []tls.Certificate{keypair} - - return &tlsConfig, nil -} - -func (xcg *X509CertGenerator) newCertificate(org string) (*x509.Certificate, error) { - now := time.Now() - // need to set notBefore slightly in the past to account for time - // skew in the VMs otherwise the certs sometimes are not yet valid - notBefore := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-5, 0, 0, time.Local) - notAfter := notBefore.Add(time.Hour * 24 * 1080) - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{org}, - }, - NotBefore: notBefore, - NotAfter: notAfter, - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement, - BasicConstraintsValid: true, - }, nil - -} - -// GenerateCACertificate generates a new certificate authority from the specified org -// and bit size and stores the resulting certificate and key file -// in the arguments. -func (xcg *X509CertGenerator) GenerateCACertificate(certFile, keyFile, org string, bits int) error { - template, err := xcg.newCertificate(org) - if err != nil { - return err - } - - template.IsCA = true - template.KeyUsage |= x509.KeyUsageCertSign - template.KeyUsage |= x509.KeyUsageKeyEncipherment - template.KeyUsage |= x509.KeyUsageKeyAgreement - - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return err - } - - derBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv) - if err != nil { - return err - } - - certOut, err := os.Create(certFile) - if err != nil { - return err - } - - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - - } - - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) - keyOut.Close() - - return nil -} - -// GenerateCert generates a new certificate signed using the provided -// certificate authority files and stores the result in the certificate -// file and key provided. The provided host names are set to the -// appropriate certificate fields. -func (xcg *X509CertGenerator) GenerateCert(opts *Options) error { - template, err := xcg.newCertificate(opts.Org) - if err != nil { - return err - } - // client - if len(opts.Hosts) == 1 && opts.Hosts[0] == "" { - template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - template.KeyUsage = x509.KeyUsageDigitalSignature - } else { // server - template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} - if opts.SwarmMaster { - // Extend the Swarm master's server certificate - // permissions to also be able to connect to downstream - // nodes as a client. - template.ExtKeyUsage = append(template.ExtKeyUsage, x509.ExtKeyUsageClientAuth) - } - for _, h := range opts.Hosts { - if ip := net.ParseIP(h); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else { - template.DNSNames = append(template.DNSNames, h) - } - } - } - - tlsCert, err := tls.LoadX509KeyPair(opts.CAFile, opts.CAKeyFile) - if err != nil { - return err - } - - priv, err := rsa.GenerateKey(rand.Reader, opts.Bits) - if err != nil { - return err - } - - x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0]) - if err != nil { - return err - } - - derBytes, err := x509.CreateCertificate(rand.Reader, template, x509Cert, &priv.PublicKey, tlsCert.PrivateKey) - if err != nil { - return err - } - - certOut, err := os.Create(opts.CertFile) - if err != nil { - return err - } - - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - keyOut, err := os.OpenFile(opts.KeyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) - keyOut.Close() - - return nil -} - -// ReadTLSConfig reads the tls config for a machine. -func (xcg *X509CertGenerator) ReadTLSConfig(addr string, authOptions *auth.Options) (*tls.Config, error) { - caCertPath := authOptions.CaCertPath - clientCertPath := authOptions.ClientCertPath - clientKeyPath := authOptions.ClientKeyPath - - log.Debugf("Reading CA certificate from %s", caCertPath) - caCert, err := ioutil.ReadFile(caCertPath) - if err != nil { - return nil, err - } - - log.Debugf("Reading client certificate from %s", clientCertPath) - clientCert, err := ioutil.ReadFile(clientCertPath) - if err != nil { - return nil, err - } - - log.Debugf("Reading client key from %s", clientKeyPath) - clientKey, err := ioutil.ReadFile(clientKeyPath) - if err != nil { - return nil, err - } - - return xcg.getTLSConfig(caCert, clientCert, clientKey, false) -} - -// ValidateCertificate validate the certificate installed on the vm. -func (xcg *X509CertGenerator) ValidateCertificate(addr string, authOptions *auth.Options) (bool, error) { - tlsConfig, err := xcg.ReadTLSConfig(addr, authOptions) - if err != nil { - return false, err - } - - dialer := &net.Dialer{ - Timeout: time.Second * 20, - } - - _, err = tls.DialWithDialer(dialer, "tcp", addr, tlsConfig) - if err != nil { - return false, err - } - - return true, nil -} - -func CheckCertificateDate(certPath string) (bool, error) { - log.Debugf("Reading certificate data from %s", certPath) - certBytes, err := ioutil.ReadFile(certPath) - if err != nil { - return false, err - } - - log.Debug("Decoding PEM data...") - pemBlock, _ := pem.Decode(certBytes) - if pemBlock == nil { - return false, errors.New("Failed to decode PEM data") - } - - log.Debug("Parsing certificate...") - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return false, err - } - if time.Now().After(cert.NotAfter) { - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/check/check.go b/vendor/github.com/docker/machine/libmachine/check/check.go deleted file mode 100644 index 49fbce28201a..000000000000 --- a/vendor/github.com/docker/machine/libmachine/check/check.go +++ /dev/null @@ -1,118 +0,0 @@ -package check - -import ( - "errors" - "fmt" - "net/url" - "strings" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/cert" - "github.com/docker/machine/libmachine/host" -) - -var ( - DefaultConnChecker ConnChecker - ErrSwarmNotStarted = errors.New("Connection to Swarm cannot be checked but the certs are valid. Maybe swarm is not started") -) - -func init() { - DefaultConnChecker = &MachineConnChecker{} -} - -// ErrCertInvalid for when the cert is computed to be invalid. -type ErrCertInvalid struct { - wrappedErr error - hostURL string -} - -func (e ErrCertInvalid) Error() string { - return fmt.Sprintf(`There was an error validating certificates for host %q: %s -You can attempt to regenerate them using 'docker-machine regenerate-certs [name]'. -Be advised that this will trigger a Docker daemon restart which might stop running containers. -`, e.hostURL, e.wrappedErr) -} - -type ConnChecker interface { - Check(*host.Host, bool) (dockerHost string, authOptions *auth.Options, err error) -} - -type MachineConnChecker struct{} - -func (mcc *MachineConnChecker) Check(h *host.Host, swarm bool) (string, *auth.Options, error) { - dockerHost, err := h.Driver.GetURL() - if err != nil { - return "", &auth.Options{}, err - } - - dockerURL := dockerHost - if swarm { - dockerURL, err = parseSwarm(dockerHost, h) - if err != nil { - return "", &auth.Options{}, err - } - } - - u, err := url.Parse(dockerURL) - if err != nil { - return "", &auth.Options{}, fmt.Errorf("Error parsing URL: %s", err) - } - - authOptions := h.AuthOptions() - - if err := checkCert(u.Host, authOptions); err != nil { - if swarm { - // Connection to the swarm port cannot be checked. Maybe it's just the swarm containers that are down - // TODO: check the containers and restart them - // Let's check the non-swarm connection to give a better error message to the user. - if _, _, err := mcc.Check(h, false); err == nil { - return "", &auth.Options{}, ErrSwarmNotStarted - } - } - - return "", &auth.Options{}, fmt.Errorf("Error checking and/or regenerating the certs: %s", err) - } - - return dockerURL, authOptions, nil -} - -func checkCert(hostURL string, authOptions *auth.Options) error { - valid, err := cert.ValidateCertificate(hostURL, authOptions) - if !valid || err != nil { - return ErrCertInvalid{ - wrappedErr: err, - hostURL: hostURL, - } - } - - return nil -} - -// TODO: This could use a unit test. -func parseSwarm(hostURL string, h *host.Host) (string, error) { - swarmOptions := h.HostOptions.SwarmOptions - - if !swarmOptions.Master { - return "", fmt.Errorf("%q is not a swarm master. The --swarm flag is intended for use with swarm masters", h.Name) - } - - u, err := url.Parse(swarmOptions.Host) - if err != nil { - return "", fmt.Errorf("There was an error parsing the url: %s", err) - } - parts := strings.Split(u.Host, ":") - swarmPort := parts[1] - - // get IP of machine to replace in case swarm host is 0.0.0.0 - mURL, err := url.Parse(hostURL) - if err != nil { - return "", fmt.Errorf("There was an error parsing the url: %s", err) - } - - mParts := strings.Split(mURL.Host, ":") - machineIP := mParts[0] - - hostURL = fmt.Sprintf("tcp://%s:%s", machineIP, swarmPort) - - return hostURL, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/base.go b/vendor/github.com/docker/machine/libmachine/drivers/base.go deleted file mode 100644 index 4ed066432c2a..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/base.go +++ /dev/null @@ -1,94 +0,0 @@ -package drivers - -import ( - "errors" - "path/filepath" -) - -const ( - DefaultSSHUser = "root" - DefaultSSHPort = 22 - DefaultEngineInstallURL = "https://get.docker.com" -) - -// BaseDriver - Embed this struct into drivers to provide the common set -// of fields and functions. -type BaseDriver struct { - IPAddress string - MachineName string - SSHUser string - SSHPort int - SSHKeyPath string - StorePath string - SwarmMaster bool - SwarmHost string - SwarmDiscovery string -} - -// DriverName returns the name of the driver -func (d *BaseDriver) DriverName() string { - return "unknown" -} - -// GetMachineName returns the machine name -func (d *BaseDriver) GetMachineName() string { - return d.MachineName -} - -// GetIP returns the ip -func (d *BaseDriver) GetIP() (string, error) { - if d.IPAddress == "" { - return "", errors.New("IP address is not set") - } - return d.IPAddress, nil -} - -// GetSSHKeyPath returns the ssh key path -func (d *BaseDriver) GetSSHKeyPath() string { - if d.SSHKeyPath == "" { - d.SSHKeyPath = d.ResolveStorePath("id_rsa") - } - return d.SSHKeyPath -} - -// GetSSHPort returns the ssh port, 22 if not specified -func (d *BaseDriver) GetSSHPort() (int, error) { - if d.SSHPort == 0 { - d.SSHPort = DefaultSSHPort - } - - return d.SSHPort, nil -} - -// GetSSHUsername returns the ssh user name, root if not specified -func (d *BaseDriver) GetSSHUsername() string { - if d.SSHUser == "" { - d.SSHUser = DefaultSSHUser - } - return d.SSHUser -} - -// PreCreateCheck is called to enforce pre-creation steps -func (d *BaseDriver) PreCreateCheck() error { - return nil -} - -// ResolveStorePath returns the store path where the machine is -func (d *BaseDriver) ResolveStorePath(file string) string { - return filepath.Join(d.StorePath, "machines", d.MachineName, file) -} - -// SetSwarmConfigFromFlags configures the driver for swarm -func (d *BaseDriver) SetSwarmConfigFromFlags(flags DriverOptions) { - d.SwarmMaster = flags.Bool("swarm-master") - d.SwarmHost = flags.String("swarm-host") - d.SwarmDiscovery = flags.String("swarm-discovery") -} - -func EngineInstallURLFlagSet(flags DriverOptions) bool { - return EngineInstallURLSet(flags.String("engine-install-url")) -} - -func EngineInstallURLSet(url string) bool { - return url != DefaultEngineInstallURL && url != "" -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/check.go b/vendor/github.com/docker/machine/libmachine/drivers/check.go deleted file mode 100644 index 677247186d99..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/check.go +++ /dev/null @@ -1,84 +0,0 @@ -package drivers - -import "github.com/docker/machine/libmachine/mcnflag" - -// CheckDriverOptions implements DriverOptions and is used to validate flag parsing -type CheckDriverOptions struct { - FlagsValues map[string]interface{} - CreateFlags []mcnflag.Flag - InvalidFlags []string -} - -func (o *CheckDriverOptions) String(key string) string { - for _, flag := range o.CreateFlags { - if flag.String() == key { - f, ok := flag.(mcnflag.StringFlag) - if !ok { - o.InvalidFlags = append(o.InvalidFlags, flag.String()) - } - - value, present := o.FlagsValues[key].(string) - if present { - return value - } - return f.Value - } - } - - return "" -} - -func (o *CheckDriverOptions) StringSlice(key string) []string { - for _, flag := range o.CreateFlags { - if flag.String() == key { - f, ok := flag.(mcnflag.StringSliceFlag) - if !ok { - o.InvalidFlags = append(o.InvalidFlags, flag.String()) - } - - value, present := o.FlagsValues[key].([]string) - if present { - return value - } - return f.Value - } - } - - return nil -} - -func (o *CheckDriverOptions) Int(key string) int { - for _, flag := range o.CreateFlags { - if flag.String() == key { - f, ok := flag.(mcnflag.IntFlag) - if !ok { - o.InvalidFlags = append(o.InvalidFlags, flag.String()) - } - - value, present := o.FlagsValues[key].(int) - if present { - return value - } - return f.Value - } - } - - return 0 -} - -func (o *CheckDriverOptions) Bool(key string) bool { - for _, flag := range o.CreateFlags { - if flag.String() == key { - _, ok := flag.(mcnflag.BoolFlag) - if !ok { - o.InvalidFlags = append(o.InvalidFlags, flag.String()) - } - } - } - - value, present := o.FlagsValues[key].(bool) - if present { - return value - } - return false -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/drivers.go b/vendor/github.com/docker/machine/libmachine/drivers/drivers.go deleted file mode 100644 index dbc37d0e1276..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/drivers.go +++ /dev/null @@ -1,109 +0,0 @@ -package drivers - -import ( - "errors" - - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" -) - -// Driver defines how a host is created and controlled. Different types of -// driver represent different ways hosts can be created (e.g. different -// hypervisors, different cloud providers) -type Driver interface { - // Create a host using the driver's config - Create() error - - // DriverName returns the name of the driver - DriverName() string - - // GetCreateFlags returns the mcnflag.Flag slice representing the flags - // that can be set, their descriptions and defaults. - GetCreateFlags() []mcnflag.Flag - - // GetIP returns an IP or hostname that this host is available at - // e.g. 1.2.3.4 or docker-host-d60b70a14d3a.cloudapp.net - GetIP() (string, error) - - // GetMachineName returns the name of the machine - GetMachineName() string - - // GetSSHHostname returns hostname for use with ssh - GetSSHHostname() (string, error) - - // GetSSHKeyPath returns key path for use with ssh - GetSSHKeyPath() string - - // GetSSHPort returns port for use with ssh - GetSSHPort() (int, error) - - // GetSSHUsername returns username for use with ssh - GetSSHUsername() string - - // GetURL returns a Docker compatible host URL for connecting to this host - // e.g. tcp://1.2.3.4:2376 - GetURL() (string, error) - - // GetState returns the state that the host is in (running, stopped, etc) - GetState() (state.State, error) - - // Kill stops a host forcefully - Kill() error - - // PreCreateCheck allows for pre-create operations to make sure a driver is ready for creation - PreCreateCheck() error - - // Remove a host - Remove() error - - // Restart a host. This may just call Stop(); Start() if the provider does not - // have any special restart behaviour. - Restart() error - - // SetConfigFromFlags configures the driver with the object that was returned - // by RegisterCreateFlags - SetConfigFromFlags(opts DriverOptions) error - - // Start a host - Start() error - - // Stop a host gracefully - Stop() error -} - -var ErrHostIsNotRunning = errors.New("Host is not running") - -type DriverOptions interface { - String(key string) string - StringSlice(key string) []string - Int(key string) int - Bool(key string) bool -} - -func MachineInState(d Driver, desiredState state.State) func() bool { - return func() bool { - currentState, err := d.GetState() - if err != nil { - log.Debugf("Error getting machine state: %s", err) - } - if currentState == desiredState { - return true - } - return false - } -} - -// MustBeRunning will return an error if the machine is not in a running state. -func MustBeRunning(d Driver) error { - s, err := d.GetState() - if err != nil { - return err - } - - if s != state.Running { - return ErrHostIsNotRunning - } - - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/notsupported.go b/vendor/github.com/docker/machine/libmachine/drivers/notsupported.go deleted file mode 100644 index 59a1d4e53895..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/notsupported.go +++ /dev/null @@ -1,89 +0,0 @@ -package drivers - -import ( - "fmt" - - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" -) - -type DriverNotSupported struct { - *BaseDriver - Name string -} - -type NotSupported struct { - DriverName string -} - -func (e NotSupported) Error() string { - return fmt.Sprintf("Driver %q not supported on this platform.", e.DriverName) -} - -// NewDriverNotSupported creates a placeholder Driver that replaces -// a driver that is not supported on a given platform. eg fusion on linux. -func NewDriverNotSupported(driverName, hostName, storePath string) Driver { - return &DriverNotSupported{ - BaseDriver: &BaseDriver{ - MachineName: hostName, - StorePath: storePath, - }, - Name: driverName, - } -} - -func (d *DriverNotSupported) DriverName() string { - return d.Name -} - -func (d *DriverNotSupported) PreCreateCheck() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) GetCreateFlags() []mcnflag.Flag { - return nil -} - -func (d *DriverNotSupported) SetConfigFromFlags(flags DriverOptions) error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) GetURL() (string, error) { - return "", NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) GetSSHHostname() (string, error) { - return "", NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) GetState() (state.State, error) { - return state.Error, NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Create() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Remove() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Start() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Stop() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Restart() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Kill() error { - return NotSupported{d.DriverName()} -} - -func (d *DriverNotSupported) Upgrade() error { - return NotSupported{d.DriverName()} -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/plugin/localbinary/plugin.go b/vendor/github.com/docker/machine/libmachine/drivers/plugin/localbinary/plugin.go deleted file mode 100644 index c431c7eda9d7..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/plugin/localbinary/plugin.go +++ /dev/null @@ -1,250 +0,0 @@ -package localbinary - -import ( - "bufio" - "fmt" - "io" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine/log" -) - -var ( - // Timeout where we will bail if we're not able to properly contact the - // plugin server. - defaultTimeout = 10 * time.Second - CurrentBinaryIsDockerMachine = false - CoreDrivers = []string{"amazonec2", "azure", "digitalocean", - "exoscale", "generic", "google", "hyperv", "none", "openstack", - "rackspace", "softlayer", "virtualbox", "vmwarefusion", - "vmwarevcloudair", "vmwarevsphere"} -) - -const ( - pluginOut = "(%s) %s" - pluginErr = "(%s) DBG | %s" - PluginEnvKey = "MACHINE_PLUGIN_TOKEN" - PluginEnvVal = "42" - PluginEnvDriverName = "MACHINE_PLUGIN_DRIVER_NAME" -) - -type PluginStreamer interface { - // Return a channel for receiving the output of the stream line by - // line. - // - // It happens to be the case that we do this all inside of the main - // plugin struct today, but that may not be the case forever. - AttachStream(*bufio.Scanner) <-chan string -} - -type PluginServer interface { - // Get the address where the plugin server is listening. - Address() (string, error) - - // Serve kicks off the plugin server. - Serve() error - - // Close shuts down the initialized server. - Close() error -} - -type McnBinaryExecutor interface { - // Execute the driver plugin. Returns scanners for plugin binary - // stdout and stderr. - Start() (*bufio.Scanner, *bufio.Scanner, error) - - // Stop reading from the plugins in question. - Close() error -} - -// DriverPlugin interface wraps the underlying mechanics of starting a driver -// plugin server and then figuring out where it can be dialed. -type DriverPlugin interface { - PluginServer - PluginStreamer -} - -type Plugin struct { - Executor McnBinaryExecutor - Addr string - MachineName string - addrCh chan string - stopCh chan struct{} - timeout time.Duration -} - -type Executor struct { - pluginStdout, pluginStderr io.ReadCloser - DriverName string - cmd *exec.Cmd - binaryPath string -} - -type ErrPluginBinaryNotFound struct { - driverName string - driverPath string -} - -func (e ErrPluginBinaryNotFound) Error() string { - return fmt.Sprintf("Driver %q not found. Do you have the plugin binary %q accessible in your PATH?", e.driverName, e.driverPath) -} - -// driverPath finds the path of a driver binary by its name. -// + If the driver is a core driver, there is no separate driver binary. We reuse current binary if it's `docker-machine` -// or we assume `docker-machine` is in the PATH. -// + If the driver is NOT a core driver, then the separate binary must be in the PATH and it's name must be -// `docker-machine-driver-driverName` -func driverPath(driverName string) string { - for _, coreDriver := range CoreDrivers { - if coreDriver == driverName { - if CurrentBinaryIsDockerMachine { - return os.Args[0] - } - - return "docker-machine" - } - } - - return fmt.Sprintf("docker-machine-driver-%s", driverName) -} - -func NewPlugin(driverName string) (*Plugin, error) { - driverPath := driverPath(driverName) - binaryPath, err := exec.LookPath(driverPath) - if err != nil { - return nil, ErrPluginBinaryNotFound{driverName, driverPath} - } - - log.Debugf("Found binary path at %s", binaryPath) - - return &Plugin{ - stopCh: make(chan struct{}), - addrCh: make(chan string, 1), - Executor: &Executor{ - DriverName: driverName, - binaryPath: binaryPath, - }, - }, nil -} - -func (lbe *Executor) Start() (*bufio.Scanner, *bufio.Scanner, error) { - var err error - - log.Debugf("Launching plugin server for driver %s", lbe.DriverName) - - lbe.cmd = exec.Command(lbe.binaryPath) - - lbe.pluginStdout, err = lbe.cmd.StdoutPipe() - if err != nil { - return nil, nil, fmt.Errorf("Error getting cmd stdout pipe: %s", err) - } - - lbe.pluginStderr, err = lbe.cmd.StderrPipe() - if err != nil { - return nil, nil, fmt.Errorf("Error getting cmd stderr pipe: %s", err) - } - - outScanner := bufio.NewScanner(lbe.pluginStdout) - errScanner := bufio.NewScanner(lbe.pluginStderr) - - os.Setenv(PluginEnvKey, PluginEnvVal) - os.Setenv(PluginEnvDriverName, lbe.DriverName) - - if err := lbe.cmd.Start(); err != nil { - return nil, nil, fmt.Errorf("Error starting plugin binary: %s", err) - } - - return outScanner, errScanner, nil -} - -func (lbe *Executor) Close() error { - if err := lbe.cmd.Wait(); err != nil { - return fmt.Errorf("Error waiting for binary close: %s", err) - } - - return nil -} - -func stream(scanner *bufio.Scanner, streamOutCh chan<- string, stopCh <-chan struct{}) { - for scanner.Scan() { - line := scanner.Text() - if err := scanner.Err(); err != nil { - log.Warnf("Scanning stream: %s", err) - } - select { - case streamOutCh <- strings.Trim(line, "\n"): - case <-stopCh: - return - } - } -} - -func (lbp *Plugin) AttachStream(scanner *bufio.Scanner) <-chan string { - streamOutCh := make(chan string) - go stream(scanner, streamOutCh, lbp.stopCh) - return streamOutCh -} - -func (lbp *Plugin) execServer() error { - outScanner, errScanner, err := lbp.Executor.Start() - if err != nil { - return err - } - - // Scan just one line to get the address, then send it to the relevant - // channel. - outScanner.Scan() - addr := outScanner.Text() - if err := outScanner.Err(); err != nil { - return fmt.Errorf("Reading plugin address failed: %s", err) - } - - lbp.addrCh <- strings.TrimSpace(addr) - - stdOutCh := lbp.AttachStream(outScanner) - stdErrCh := lbp.AttachStream(errScanner) - - for { - select { - case out := <-stdOutCh: - log.Infof(pluginOut, lbp.MachineName, out) - case err := <-stdErrCh: - log.Debugf(pluginErr, lbp.MachineName, err) - case <-lbp.stopCh: - if err := lbp.Executor.Close(); err != nil { - return fmt.Errorf("Error closing local plugin binary: %s", err) - } - return nil - } - } -} - -func (lbp *Plugin) Serve() error { - return lbp.execServer() -} - -func (lbp *Plugin) Address() (string, error) { - if lbp.Addr == "" { - if lbp.timeout == 0 { - lbp.timeout = defaultTimeout - } - - select { - case lbp.Addr = <-lbp.addrCh: - log.Debugf("Plugin server listening at address %s", lbp.Addr) - close(lbp.addrCh) - return lbp.Addr, nil - case <-time.After(lbp.timeout): - return "", fmt.Errorf("Failed to dial the plugin server in %s", lbp.timeout) - } - } - return lbp.Addr, nil -} - -func (lbp *Plugin) Close() error { - close(lbp.stopCh) - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/plugin/register_driver.go b/vendor/github.com/docker/machine/libmachine/drivers/plugin/register_driver.go deleted file mode 100644 index 27e0dbc33d4f..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/plugin/register_driver.go +++ /dev/null @@ -1,63 +0,0 @@ -package plugin - -import ( - "fmt" - "net" - "net/http" - "net/rpc" - "os" - "time" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin/localbinary" - "github.com/docker/machine/libmachine/drivers/rpc" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/version" -) - -var ( - heartbeatTimeout = 10 * time.Second -) - -func RegisterDriver(d drivers.Driver) { - if os.Getenv(localbinary.PluginEnvKey) != localbinary.PluginEnvVal { - fmt.Fprintf(os.Stderr, `This is a Docker Machine plugin binary. -Plugin binaries are not intended to be invoked directly. -Please use this plugin through the main 'docker-machine' binary. -(API version: %d) -`, version.APIVersion) - os.Exit(1) - } - - log.SetDebug(true) - os.Setenv("MACHINE_DEBUG", "1") - - rpcd := rpcdriver.NewRPCServerDriver(d) - rpc.RegisterName(rpcdriver.RPCServiceNameV0, rpcd) - rpc.RegisterName(rpcdriver.RPCServiceNameV1, rpcd) - rpc.HandleHTTP() - - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - fmt.Fprintf(os.Stderr, "Error loading RPC server: %s\n", err) - os.Exit(1) - } - defer listener.Close() - - fmt.Println(listener.Addr()) - - go http.Serve(listener, nil) - - for { - select { - case <-rpcd.CloseCh: - log.Debug("Closing plugin on server side") - os.Exit(0) - case <-rpcd.HeartbeatCh: - continue - case <-time.After(heartbeatTimeout): - // TODO: Add heartbeat retry logic - os.Exit(1) - } - } -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/rpc/client_driver.go b/vendor/github.com/docker/machine/libmachine/drivers/rpc/client_driver.go deleted file mode 100644 index 5ce22cdcd68d..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/rpc/client_driver.go +++ /dev/null @@ -1,367 +0,0 @@ -package rpcdriver - -import ( - "fmt" - "net/rpc" - "sync" - "time" - - "io" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin/localbinary" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" - "github.com/docker/machine/libmachine/version" -) - -var ( - heartbeatInterval = 5 * time.Second -) - -type RPCClientDriverFactory interface { - NewRPCClientDriver(driverName string, rawDriver []byte) (*RPCClientDriver, error) - io.Closer -} - -type DefaultRPCClientDriverFactory struct { - openedDrivers []*RPCClientDriver - openedDriversLock sync.Locker -} - -func NewRPCClientDriverFactory() RPCClientDriverFactory { - return &DefaultRPCClientDriverFactory{ - openedDrivers: []*RPCClientDriver{}, - openedDriversLock: &sync.Mutex{}, - } -} - -type RPCClientDriver struct { - plugin localbinary.DriverPlugin - heartbeatDoneCh chan bool - Client *InternalClient -} - -type RPCCall struct { - ServiceMethod string - Args interface{} - Reply interface{} -} - -type InternalClient struct { - MachineName string - RPCClient *rpc.Client - rpcServiceName string -} - -const ( - RPCServiceNameV0 = `RpcServerDriver` - RPCServiceNameV1 = `RPCServerDriver` - - HeartbeatMethod = `.Heartbeat` - GetVersionMethod = `.GetVersion` - CloseMethod = `.Close` - GetCreateFlagsMethod = `.GetCreateFlags` - SetConfigRawMethod = `.SetConfigRaw` - GetConfigRawMethod = `.GetConfigRaw` - DriverNameMethod = `.DriverName` - SetConfigFromFlagsMethod = `.SetConfigFromFlags` - GetURLMethod = `.GetURL` - GetMachineNameMethod = `.GetMachineName` - GetIPMethod = `.GetIP` - GetSSHHostnameMethod = `.GetSSHHostname` - GetSSHKeyPathMethod = `.GetSSHKeyPath` - GetSSHPortMethod = `.GetSSHPort` - GetSSHUsernameMethod = `.GetSSHUsername` - GetStateMethod = `.GetState` - PreCreateCheckMethod = `.PreCreateCheck` - CreateMethod = `.Create` - RemoveMethod = `.Remove` - StartMethod = `.Start` - StopMethod = `.Stop` - RestartMethod = `.Restart` - KillMethod = `.Kill` - UpgradeMethod = `.Upgrade` -) - -func (ic *InternalClient) Call(serviceMethod string, args interface{}, reply interface{}) error { - if serviceMethod != HeartbeatMethod { - log.Debugf("(%s) Calling %+v", ic.MachineName, serviceMethod) - } - return ic.RPCClient.Call(ic.rpcServiceName+serviceMethod, args, reply) -} - -func (ic *InternalClient) switchToV0() { - ic.rpcServiceName = RPCServiceNameV0 -} - -func NewInternalClient(rpcclient *rpc.Client) *InternalClient { - return &InternalClient{ - RPCClient: rpcclient, - rpcServiceName: RPCServiceNameV1, - } -} - -func (f *DefaultRPCClientDriverFactory) Close() error { - f.openedDriversLock.Lock() - defer f.openedDriversLock.Unlock() - - for _, openedDriver := range f.openedDrivers { - if err := openedDriver.close(); err != nil { - // No need to display an error. - // There's nothing we can do and it doesn't add value to the user. - } - } - f.openedDrivers = []*RPCClientDriver{} - - return nil -} - -func (f *DefaultRPCClientDriverFactory) NewRPCClientDriver(driverName string, rawDriver []byte) (*RPCClientDriver, error) { - mcnName := "" - - p, err := localbinary.NewPlugin(driverName) - if err != nil { - return nil, err - } - - go func() { - if err := p.Serve(); err != nil { - // TODO: Is this best approach? - log.Warn(err) - return - } - }() - - addr, err := p.Address() - if err != nil { - return nil, fmt.Errorf("Error attempting to get plugin server address for RPC: %s", err) - } - - rpcclient, err := rpc.DialHTTP("tcp", addr) - if err != nil { - return nil, err - } - - c := &RPCClientDriver{ - Client: NewInternalClient(rpcclient), - heartbeatDoneCh: make(chan bool), - } - - f.openedDriversLock.Lock() - f.openedDrivers = append(f.openedDrivers, c) - f.openedDriversLock.Unlock() - - var serverVersion int - if err := c.Client.Call(GetVersionMethod, struct{}{}, &serverVersion); err != nil { - // this is the first call we make to the server. We try to play nice with old pre 0.5.1 client, - // by gracefully trying old RPCServiceName, we do this only once, and keep the result for future calls. - log.Debugf(err.Error()) - log.Debugf("Client (%s) with %s does not work, re-attempting with %s", c.Client.MachineName, RPCServiceNameV1, RPCServiceNameV0) - c.Client.switchToV0() - if err := c.Client.Call(GetVersionMethod, struct{}{}, &serverVersion); err != nil { - return nil, err - } - } - - if serverVersion != version.APIVersion { - return nil, fmt.Errorf("Driver binary uses an incompatible API version (%d)", serverVersion) - } - log.Debug("Using API Version ", serverVersion) - - go func(c *RPCClientDriver) { - for { - select { - case <-c.heartbeatDoneCh: - return - case <-time.After(heartbeatInterval): - if err := c.Client.Call(HeartbeatMethod, struct{}{}, nil); err != nil { - log.Warnf("Wrapper Docker Machine process exiting due to closed plugin server (%s)", err) - if err := c.close(); err != nil { - log.Warn(err) - } - } - } - } - }(c) - - if err := c.SetConfigRaw(rawDriver); err != nil { - return nil, err - } - - mcnName = c.GetMachineName() - p.MachineName = mcnName - c.Client.MachineName = mcnName - c.plugin = p - - return c, nil -} - -func (c *RPCClientDriver) MarshalJSON() ([]byte, error) { - return c.GetConfigRaw() -} - -func (c *RPCClientDriver) UnmarshalJSON(data []byte) error { - return c.SetConfigRaw(data) -} - -func (c *RPCClientDriver) close() error { - c.heartbeatDoneCh <- true - close(c.heartbeatDoneCh) - - log.Debug("Making call to close driver server") - - if err := c.Client.Call(CloseMethod, struct{}{}, nil); err != nil { - log.Debugf("Failed to make call to close driver server: %s", err) - } else { - log.Debug("Successfully made call to close driver server") - } - - log.Debug("Making call to close connection to plugin binary") - - return c.plugin.Close() -} - -// Helper method to make requests which take no arguments and return simply a -// string, e.g. "GetIP". -func (c *RPCClientDriver) rpcStringCall(method string) (string, error) { - var info string - - if err := c.Client.Call(method, struct{}{}, &info); err != nil { - return "", err - } - - return info, nil -} - -func (c *RPCClientDriver) GetCreateFlags() []mcnflag.Flag { - var flags []mcnflag.Flag - - if err := c.Client.Call(GetCreateFlagsMethod, struct{}{}, &flags); err != nil { - log.Warnf("Error attempting call to get create flags: %s", err) - } - - return flags -} - -func (c *RPCClientDriver) SetConfigRaw(data []byte) error { - return c.Client.Call(SetConfigRawMethod, data, nil) -} - -func (c *RPCClientDriver) GetConfigRaw() ([]byte, error) { - var data []byte - - if err := c.Client.Call(GetConfigRawMethod, struct{}{}, &data); err != nil { - return nil, err - } - - return data, nil -} - -// DriverName returns the name of the driver -func (c *RPCClientDriver) DriverName() string { - driverName, err := c.rpcStringCall(DriverNameMethod) - if err != nil { - log.Warnf("Error attempting call to get driver name: %s", err) - } - - return driverName -} - -func (c *RPCClientDriver) SetConfigFromFlags(flags drivers.DriverOptions) error { - return c.Client.Call(SetConfigFromFlagsMethod, &flags, nil) -} - -func (c *RPCClientDriver) GetURL() (string, error) { - return c.rpcStringCall(GetURLMethod) -} - -func (c *RPCClientDriver) GetMachineName() string { - name, err := c.rpcStringCall(GetMachineNameMethod) - if err != nil { - log.Warnf("Error attempting call to get machine name: %s", err) - } - - return name -} - -func (c *RPCClientDriver) GetIP() (string, error) { - return c.rpcStringCall(GetIPMethod) -} - -func (c *RPCClientDriver) GetSSHHostname() (string, error) { - return c.rpcStringCall(GetSSHHostnameMethod) -} - -// GetSSHKeyPath returns the key path -// TODO: This method doesn't even make sense to have with RPC. -func (c *RPCClientDriver) GetSSHKeyPath() string { - path, err := c.rpcStringCall(GetSSHKeyPathMethod) - if err != nil { - log.Warnf("Error attempting call to get SSH key path: %s", err) - } - - return path -} - -func (c *RPCClientDriver) GetSSHPort() (int, error) { - var port int - - if err := c.Client.Call(GetSSHPortMethod, struct{}{}, &port); err != nil { - return 0, err - } - - return port, nil -} - -func (c *RPCClientDriver) GetSSHUsername() string { - username, err := c.rpcStringCall(GetSSHUsernameMethod) - if err != nil { - log.Warnf("Error attempting call to get SSH username: %s", err) - } - - return username -} - -func (c *RPCClientDriver) GetState() (state.State, error) { - var s state.State - - if err := c.Client.Call(GetStateMethod, struct{}{}, &s); err != nil { - return state.Error, err - } - - return s, nil -} - -func (c *RPCClientDriver) PreCreateCheck() error { - return c.Client.Call(PreCreateCheckMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Create() error { - return c.Client.Call(CreateMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Remove() error { - return c.Client.Call(RemoveMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Start() error { - return c.Client.Call(StartMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Stop() error { - return c.Client.Call(StopMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Restart() error { - return c.Client.Call(RestartMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Kill() error { - return c.Client.Call(KillMethod, struct{}{}, nil) -} - -func (c *RPCClientDriver) Upgrade() error { - return c.Client.Call(UpgradeMethod, struct{}{}, nil) -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/rpc/server_driver.go b/vendor/github.com/docker/machine/libmachine/drivers/rpc/server_driver.go deleted file mode 100644 index 6fb3f3d5d863..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/rpc/server_driver.go +++ /dev/null @@ -1,227 +0,0 @@ -package rpcdriver - -import ( - "encoding/gob" - "encoding/json" - "fmt" - "runtime/debug" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" - "github.com/docker/machine/libmachine/version" -) - -type Stacker interface { - Stack() []byte -} - -type StandardStack struct{} - -func (ss *StandardStack) Stack() []byte { - return debug.Stack() -} - -var ( - stdStacker Stacker = &StandardStack{} -) - -func init() { - gob.Register(new(RPCFlags)) - gob.Register(new(mcnflag.IntFlag)) - gob.Register(new(mcnflag.StringFlag)) - gob.Register(new(mcnflag.StringSliceFlag)) - gob.Register(new(mcnflag.BoolFlag)) -} - -type RPCFlags struct { - Values map[string]interface{} -} - -func (r RPCFlags) Get(key string) interface{} { - val, ok := r.Values[key] - if !ok { - log.Warnf("Trying to access option %s which does not exist", key) - log.Warn("THIS ***WILL*** CAUSE UNEXPECTED BEHAVIOR") - } - return val -} - -func (r RPCFlags) String(key string) string { - val, ok := r.Get(key).(string) - if !ok { - log.Warnf("Type assertion did not go smoothly to string for key %s", key) - } - return val -} - -func (r RPCFlags) StringSlice(key string) []string { - val, ok := r.Get(key).([]string) - if !ok { - log.Warnf("Type assertion did not go smoothly to string slice for key %s", key) - } - return val -} - -func (r RPCFlags) Int(key string) int { - val, ok := r.Get(key).(int) - if !ok { - log.Warnf("Type assertion did not go smoothly to int for key %s", key) - } - return val -} - -func (r RPCFlags) Bool(key string) bool { - val, ok := r.Get(key).(bool) - if !ok { - log.Warnf("Type assertion did not go smoothly to bool for key %s", key) - } - return val -} - -type RPCServerDriver struct { - ActualDriver drivers.Driver - CloseCh chan bool - HeartbeatCh chan bool -} - -func NewRPCServerDriver(d drivers.Driver) *RPCServerDriver { - return &RPCServerDriver{ - ActualDriver: d, - CloseCh: make(chan bool), - HeartbeatCh: make(chan bool), - } -} - -func (r *RPCServerDriver) Close(_, _ *struct{}) error { - r.CloseCh <- true - return nil -} - -func (r *RPCServerDriver) GetVersion(_ *struct{}, reply *int) error { - *reply = version.APIVersion - return nil -} - -func (r *RPCServerDriver) GetConfigRaw(_ *struct{}, reply *[]byte) error { - driverData, err := json.Marshal(r.ActualDriver) - if err != nil { - return err - } - - *reply = driverData - - return nil -} - -func (r *RPCServerDriver) GetCreateFlags(_ *struct{}, reply *[]mcnflag.Flag) error { - *reply = r.ActualDriver.GetCreateFlags() - return nil -} - -func (r *RPCServerDriver) SetConfigRaw(data []byte, _ *struct{}) error { - return json.Unmarshal(data, &r.ActualDriver) -} - -func trapPanic(err *error) { - if r := recover(); r != nil { - *err = fmt.Errorf("Panic in the driver: %s\n%s", r.(error), stdStacker.Stack()) - } -} - -func (r *RPCServerDriver) Create(_, _ *struct{}) (err error) { - // In an ideal world, plugins wouldn't ever panic. However, panics - // have been known to happen and cause issues. Therefore, we recover - // and do not crash the RPC server completely in the case of a panic - // during create. - defer trapPanic(&err) - - err = r.ActualDriver.Create() - - return err -} - -func (r *RPCServerDriver) DriverName(_ *struct{}, reply *string) error { - *reply = r.ActualDriver.DriverName() - return nil -} - -func (r *RPCServerDriver) GetIP(_ *struct{}, reply *string) error { - ip, err := r.ActualDriver.GetIP() - *reply = ip - return err -} - -func (r *RPCServerDriver) GetMachineName(_ *struct{}, reply *string) error { - *reply = r.ActualDriver.GetMachineName() - return nil -} - -func (r *RPCServerDriver) GetSSHHostname(_ *struct{}, reply *string) error { - hostname, err := r.ActualDriver.GetSSHHostname() - *reply = hostname - return err -} - -func (r *RPCServerDriver) GetSSHKeyPath(_ *struct{}, reply *string) error { - *reply = r.ActualDriver.GetSSHKeyPath() - return nil -} - -// GetSSHPort returns port for use with ssh -func (r *RPCServerDriver) GetSSHPort(_ *struct{}, reply *int) error { - port, err := r.ActualDriver.GetSSHPort() - *reply = port - return err -} - -func (r *RPCServerDriver) GetSSHUsername(_ *struct{}, reply *string) error { - *reply = r.ActualDriver.GetSSHUsername() - return nil -} - -func (r *RPCServerDriver) GetURL(_ *struct{}, reply *string) error { - info, err := r.ActualDriver.GetURL() - *reply = info - return err -} - -func (r *RPCServerDriver) GetState(_ *struct{}, reply *state.State) error { - s, err := r.ActualDriver.GetState() - *reply = s - return err -} - -func (r *RPCServerDriver) Kill(_ *struct{}, _ *struct{}) error { - return r.ActualDriver.Kill() -} - -func (r *RPCServerDriver) PreCreateCheck(_ *struct{}, _ *struct{}) error { - return r.ActualDriver.PreCreateCheck() -} - -func (r *RPCServerDriver) Remove(_ *struct{}, _ *struct{}) error { - return r.ActualDriver.Remove() -} - -func (r *RPCServerDriver) Restart(_ *struct{}, _ *struct{}) error { - return r.ActualDriver.Restart() -} - -func (r *RPCServerDriver) SetConfigFromFlags(flags *drivers.DriverOptions, _ *struct{}) error { - return r.ActualDriver.SetConfigFromFlags(*flags) -} - -func (r *RPCServerDriver) Start(_ *struct{}, _ *struct{}) error { - return r.ActualDriver.Start() -} - -func (r *RPCServerDriver) Stop(_ *struct{}, _ *struct{}) error { - return r.ActualDriver.Stop() -} - -func (r *RPCServerDriver) Heartbeat(_ *struct{}, _ *struct{}) error { - r.HeartbeatCh <- true - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/serial.go b/vendor/github.com/docker/machine/libmachine/drivers/serial.go deleted file mode 100644 index 056704eefd3b..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/serial.go +++ /dev/null @@ -1,172 +0,0 @@ -package drivers - -import ( - "sync" - - "encoding/json" - - "github.com/docker/machine/libmachine/mcnflag" - "github.com/docker/machine/libmachine/state" -) - -var stdLock = &sync.Mutex{} - -// SerialDriver is a wrapper struct which is used to ensure that RPC calls -// to a driver only occur one at a time. -// Some providers, e.g. virtualbox, should not run driver operations at the -// same time as other driver instances of the same type. Otherwise, we scrape -// up against VirtualBox's own locking mechanisms. -// -// It would be preferable to simply have a lock around, say, the VBoxManage -// command, but with our current one-server-process-per-machine model it is -// impossible to dictate this locking on the server side. -type SerialDriver struct { - Driver - sync.Locker -} - -func NewSerialDriver(innerDriver Driver) Driver { - return newSerialDriverWithLock(innerDriver, stdLock) -} - -func newSerialDriverWithLock(innerDriver Driver, lock sync.Locker) Driver { - return &SerialDriver{ - Driver: innerDriver, - Locker: lock, - } -} - -// Create a host using the driver's config -func (d *SerialDriver) Create() error { - d.Lock() - defer d.Unlock() - return d.Driver.Create() -} - -// DriverName returns the name of the driver as it is registered -func (d *SerialDriver) DriverName() string { - d.Lock() - defer d.Unlock() - return d.Driver.DriverName() -} - -// GetCreateFlags returns the mcnflag.Flag slice representing the flags -// that can be set, their descriptions and defaults. -func (d *SerialDriver) GetCreateFlags() []mcnflag.Flag { - d.Lock() - defer d.Unlock() - return d.Driver.GetCreateFlags() -} - -// GetIP returns an IP or hostname that this host is available at -// e.g. 1.2.3.4 or docker-host-d60b70a14d3a.cloudapp.net -func (d *SerialDriver) GetIP() (string, error) { - d.Lock() - defer d.Unlock() - return d.Driver.GetIP() -} - -// GetMachineName returns the name of the machine -func (d *SerialDriver) GetMachineName() string { - d.Lock() - defer d.Unlock() - return d.Driver.GetMachineName() -} - -// GetSSHHostname returns hostname for use with ssh -func (d *SerialDriver) GetSSHHostname() (string, error) { - d.Lock() - defer d.Unlock() - return d.Driver.GetSSHHostname() -} - -// GetSSHKeyPath returns key path for use with ssh -func (d *SerialDriver) GetSSHKeyPath() string { - d.Lock() - defer d.Unlock() - return d.Driver.GetSSHKeyPath() -} - -// GetSSHPort returns port for use with ssh -func (d *SerialDriver) GetSSHPort() (int, error) { - d.Lock() - defer d.Unlock() - return d.Driver.GetSSHPort() -} - -// GetSSHUsername returns username for use with ssh -func (d *SerialDriver) GetSSHUsername() string { - d.Lock() - defer d.Unlock() - return d.Driver.GetSSHUsername() -} - -// GetURL returns a Docker compatible host URL for connecting to this host -// e.g. tcp://1.2.3.4:2376 -func (d *SerialDriver) GetURL() (string, error) { - d.Lock() - defer d.Unlock() - return d.Driver.GetURL() -} - -// GetState returns the state that the host is in (running, stopped, etc) -func (d *SerialDriver) GetState() (state.State, error) { - d.Lock() - defer d.Unlock() - return d.Driver.GetState() -} - -// Kill stops a host forcefully -func (d *SerialDriver) Kill() error { - d.Lock() - defer d.Unlock() - return d.Driver.Kill() -} - -// PreCreateCheck allows for pre-create operations to make sure a driver is ready for creation -func (d *SerialDriver) PreCreateCheck() error { - d.Lock() - defer d.Unlock() - return d.Driver.PreCreateCheck() -} - -// Remove a host -func (d *SerialDriver) Remove() error { - d.Lock() - defer d.Unlock() - return d.Driver.Remove() -} - -// Restart a host. This may just call Stop(); Start() if the provider does not -// have any special restart behaviour. -func (d *SerialDriver) Restart() error { - d.Lock() - defer d.Unlock() - return d.Driver.Restart() -} - -// SetConfigFromFlags configures the driver with the object that was returned -// by RegisterCreateFlags -func (d *SerialDriver) SetConfigFromFlags(opts DriverOptions) error { - d.Lock() - defer d.Unlock() - return d.Driver.SetConfigFromFlags(opts) -} - -// Start a host -func (d *SerialDriver) Start() error { - d.Lock() - defer d.Unlock() - return d.Driver.Start() -} - -// Stop a host gracefully -func (d *SerialDriver) Stop() error { - d.Lock() - defer d.Unlock() - return d.Driver.Stop() -} - -func (d *SerialDriver) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Driver) -} diff --git a/vendor/github.com/docker/machine/libmachine/drivers/utils.go b/vendor/github.com/docker/machine/libmachine/drivers/utils.go deleted file mode 100644 index b023f168996d..000000000000 --- a/vendor/github.com/docker/machine/libmachine/drivers/utils.go +++ /dev/null @@ -1,73 +0,0 @@ -package drivers - -import ( - "fmt" - - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/ssh" -) - -func GetSSHClientFromDriver(d Driver) (ssh.Client, error) { - address, err := d.GetSSHHostname() - if err != nil { - return nil, err - } - - port, err := d.GetSSHPort() - if err != nil { - return nil, err - } - - var auth *ssh.Auth - if d.GetSSHKeyPath() == "" { - auth = &ssh.Auth{} - } else { - auth = &ssh.Auth{ - Keys: []string{d.GetSSHKeyPath()}, - } - } - - client, err := ssh.NewClient(d.GetSSHUsername(), address, port, auth) - return client, err - -} - -func RunSSHCommandFromDriver(d Driver, command string) (string, error) { - client, err := GetSSHClientFromDriver(d) - if err != nil { - return "", err - } - - log.Debugf("About to run SSH command:\n%s", command) - - output, err := client.Output(command) - log.Debugf("SSH cmd err, output: %v: %s", err, output) - if err != nil { - return "", fmt.Errorf(`ssh command error: -command : %s -err : %v -output : %s`, command, err, output) - } - - return output, nil -} - -func sshAvailableFunc(d Driver) func() bool { - return func() bool { - log.Debug("Getting to WaitForSSH function...") - if _, err := RunSSHCommandFromDriver(d, "exit 0"); err != nil { - log.Debugf("Error getting ssh command 'exit 0' : %s", err) - return false - } - return true - } -} - -func WaitForSSH(d Driver) error { - // Try to dial SSH for 30 seconds before timing out. - if err := mcnutils.WaitFor(sshAvailableFunc(d)); err != nil { - return fmt.Errorf("Too many retries waiting for SSH to be available. Last error: %s", err) - } - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/engine/engine.go b/vendor/github.com/docker/machine/libmachine/engine/engine.go deleted file mode 100644 index 2910432c1947..000000000000 --- a/vendor/github.com/docker/machine/libmachine/engine/engine.go +++ /dev/null @@ -1,21 +0,0 @@ -package engine - -const ( - DefaultPort = 2376 -) - -type Options struct { - ArbitraryFlags []string - DNS []string `json:"Dns"` - GraphDir string - Env []string - Ipv6 bool - InsecureRegistry []string - Labels []string - LogLevel string - StorageDriver string - SelinuxEnabled bool - TLSVerify bool `json:"TlsVerify"` - RegistryMirror []string - InstallURL string -} diff --git a/vendor/github.com/docker/machine/libmachine/host/host.go b/vendor/github.com/docker/machine/libmachine/host/host.go deleted file mode 100644 index d36e187ebabc..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/host.go +++ /dev/null @@ -1,286 +0,0 @@ -package host - -import ( - "regexp" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/cert" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcndockerclient" - "github.com/docker/machine/libmachine/mcnerror" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" - "github.com/docker/machine/libmachine/swarm" - "github.com/docker/machine/libmachine/versioncmp" -) - -var ( - validHostNamePattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9\-\.]*$`) - stdSSHClientCreator SSHClientCreator = &StandardSSHClientCreator{} -) - -type SSHClientCreator interface { - CreateSSHClient(d drivers.Driver) (ssh.Client, error) -} - -type StandardSSHClientCreator struct { - drivers.Driver -} - -func SetSSHClientCreator(creator SSHClientCreator) { - stdSSHClientCreator = creator -} - -type Host struct { - ConfigVersion int - Driver drivers.Driver - DriverName string - HostOptions *Options - Name string - RawDriver []byte `json:"-"` -} - -type Options struct { - Driver string - Memory int - Disk int - EngineOptions *engine.Options - SwarmOptions *swarm.Options - AuthOptions *auth.Options -} - -type Metadata struct { - ConfigVersion int - DriverName string - HostOptions Options -} - -func ValidateHostName(name string) bool { - return validHostNamePattern.MatchString(name) -} - -func (h *Host) RunSSHCommand(command string) (string, error) { - return drivers.RunSSHCommandFromDriver(h.Driver, command) -} - -func (h *Host) CreateSSHClient() (ssh.Client, error) { - return stdSSHClientCreator.CreateSSHClient(h.Driver) -} - -func (creator *StandardSSHClientCreator) CreateSSHClient(d drivers.Driver) (ssh.Client, error) { - addr, err := d.GetSSHHostname() - if err != nil { - return &ssh.ExternalClient{}, err - } - - port, err := d.GetSSHPort() - if err != nil { - return &ssh.ExternalClient{}, err - } - - auth := &ssh.Auth{} - if d.GetSSHKeyPath() != "" { - auth.Keys = []string{d.GetSSHKeyPath()} - } - - return ssh.NewClient(d.GetSSHUsername(), addr, port, auth) -} - -func (h *Host) runActionForState(action func() error, desiredState state.State) error { - if drivers.MachineInState(h.Driver, desiredState)() { - return mcnerror.ErrHostAlreadyInState{ - Name: h.Name, - State: desiredState, - } - } - - if err := action(); err != nil { - return err - } - - return mcnutils.WaitFor(drivers.MachineInState(h.Driver, desiredState)) -} - -func (h *Host) WaitForDocker() error { - provisioner, err := provision.DetectProvisioner(h.Driver) - if err != nil { - return err - } - - return provision.WaitForDocker(provisioner, engine.DefaultPort) -} - -func (h *Host) Start() error { - log.Infof("Starting %q...", h.Name) - if err := h.runActionForState(h.Driver.Start, state.Running); err != nil { - return err - } - - log.Infof("Machine %q was started.", h.Name) - - return h.WaitForDocker() -} - -func (h *Host) Stop() error { - log.Infof("Stopping %q...", h.Name) - if err := h.runActionForState(h.Driver.Stop, state.Stopped); err != nil { - return err - } - - log.Infof("Machine %q was stopped.", h.Name) - return nil -} - -func (h *Host) Kill() error { - log.Infof("Killing %q...", h.Name) - if err := h.runActionForState(h.Driver.Kill, state.Stopped); err != nil { - return err - } - - log.Infof("Machine %q was killed.", h.Name) - return nil -} - -func (h *Host) Restart() error { - log.Infof("Restarting %q...", h.Name) - if drivers.MachineInState(h.Driver, state.Stopped)() { - if err := h.Start(); err != nil { - return err - } - } else if drivers.MachineInState(h.Driver, state.Running)() { - if err := h.Driver.Restart(); err != nil { - return err - } - if err := mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running)); err != nil { - return err - } - } - - return h.WaitForDocker() -} - -func (h *Host) DockerVersion() (string, error) { - url, err := h.Driver.GetURL() - if err != nil { - return "", err - } - - dockerHost := &mcndockerclient.RemoteDocker{ - HostURL: url, - AuthOption: h.AuthOptions(), - } - dockerVersion, err := mcndockerclient.DockerVersion(dockerHost) - if err != nil { - return "", err - } - - return dockerVersion, nil -} - -func (h *Host) Upgrade() error { - machineState, err := h.Driver.GetState() - if err != nil { - return err - } - - if machineState != state.Running { - log.Info("Starting machine so machine can be upgraded...") - if err := h.Start(); err != nil { - return err - } - } - - provisioner, err := provision.DetectProvisioner(h.Driver) - if err != nil { - return err - } - - dockerVersion, err := h.DockerVersion() - if err != nil { - return err - } - - // If we're upgrading from a pre-CE (e.g., 1.13.1) release to a CE - // release (e.g., 17.03.0-ce), we should simply uninstall and - // re-install from scratch, since the official package names will - // change from 'docker-engine' to 'docker-ce'. - if versioncmp.LessThanOrEqualTo(dockerVersion, provision.LastReleaseBeforeCEVersioning) && - // RancherOS and boot2docker, being 'static ISO builds', have - // an upgrade process which simply grabs the latest if it's - // different, and so do not need to jump through this hoop to - // upgrade safely. - provisioner.String() != "rancheros" && - provisioner.String() != "boot2docker" { - - // Name of package 'docker-engine' will fall through in this - // case, so that we execute, e.g., - // - // 'sudo apt-get purge -y docker-engine' - if err := provisioner.Package("docker-engine", pkgaction.Purge); err != nil { - return err - } - - // Then we kick off the normal provisioning process which will - // go off and install Docker (get.docker.com script should work - // fine to install Docker from scratch after removing the old - // packages, and images/containers etc. should be preserved in - // /var/lib/docker) - return h.Provision() - } - - log.Info("Upgrading docker...") - if err := provisioner.Package("docker", pkgaction.Upgrade); err != nil { - return err - } - - log.Info("Restarting docker...") - return provisioner.Service("docker", serviceaction.Restart) -} - -func (h *Host) URL() (string, error) { - return h.Driver.GetURL() -} - -func (h *Host) AuthOptions() *auth.Options { - if h.HostOptions == nil { - return nil - } - return h.HostOptions.AuthOptions -} - -func (h *Host) ConfigureAuth() error { - provisioner, err := provision.DetectProvisioner(h.Driver) - if err != nil { - return err - } - - // TODO: This is kind of a hack (or is it? I'm not really sure until - // we have more clearly defined outlook on what the responsibilities - // and modularity of the provisioners should be). - // - // Call provision to re-provision the certs properly. - return provisioner.Provision(swarm.Options{}, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) -} - -func (h *Host) ConfigureAllAuth() error { - log.Info("Regenerating local certificates") - if err := cert.BootstrapCertificates(h.AuthOptions()); err != nil { - return err - } - return h.ConfigureAuth() -} - -func (h *Host) Provision() error { - provisioner, err := provision.DetectProvisioner(h.Driver) - if err != nil { - return err - } - - return provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) -} diff --git a/vendor/github.com/docker/machine/libmachine/host/host_v0.go b/vendor/github.com/docker/machine/libmachine/host/host_v0.go deleted file mode 100644 index a36db5fea006..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/host_v0.go +++ /dev/null @@ -1,35 +0,0 @@ -package host - -import "github.com/docker/machine/libmachine/drivers" - -type V0 struct { - Name string `json:"-"` - Driver drivers.Driver - DriverName string - ConfigVersion int - HostOptions *Options - - StorePath string - CaCertPath string - PrivateKeyPath string - ServerCertPath string - ServerKeyPath string - ClientCertPath string - SwarmHost string - SwarmMaster bool - SwarmDiscovery string - ClientKeyPath string -} - -type MetadataV0 struct { - HostOptions Options - DriverName string - - ConfigVersion int - StorePath string - CaCertPath string - PrivateKeyPath string - ServerCertPath string - ServerKeyPath string - ClientCertPath string -} diff --git a/vendor/github.com/docker/machine/libmachine/host/host_v2.go b/vendor/github.com/docker/machine/libmachine/host/host_v2.go deleted file mode 100644 index 8157e5f914aa..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/host_v2.go +++ /dev/null @@ -1,11 +0,0 @@ -package host - -import "github.com/docker/machine/libmachine/drivers" - -type V2 struct { - ConfigVersion int - Driver drivers.Driver - DriverName string - HostOptions *Options - Name string -} diff --git a/vendor/github.com/docker/machine/libmachine/host/migrate.go b/vendor/github.com/docker/machine/libmachine/host/migrate.go deleted file mode 100644 index 7134240de63e..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/migrate.go +++ /dev/null @@ -1,120 +0,0 @@ -package host - -import ( - "encoding/json" - "errors" - "fmt" - "path/filepath" - - "github.com/docker/machine/drivers/none" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/version" -) - -var ( - errConfigFromFuture = errors.New("config version is from the future -- you should upgrade your Docker Machine client") -) - -type RawDataDriver struct { - *none.Driver - Data []byte // passed directly back when invoking json.Marshal on this type -} - -func (r *RawDataDriver) MarshalJSON() ([]byte, error) { - return r.Data, nil -} - -func (r *RawDataDriver) UnmarshalJSON(data []byte) error { - r.Data = data - return nil -} - -func getMigratedHostMetadata(data []byte) (*Metadata, error) { - // HostMetadata is for a "first pass" so we can then load the driver - var ( - hostMetadata *MetadataV0 - ) - - if err := json.Unmarshal(data, &hostMetadata); err != nil { - return &Metadata{}, err - } - - migratedHostMetadata := MigrateHostMetadataV0ToHostMetadataV1(hostMetadata) - - return migratedHostMetadata, nil -} - -func MigrateHost(h *Host, data []byte) (*Host, bool, error) { - var ( - migrationNeeded = false - migrationPerformed = false - hostV1 *V1 - hostV2 *V2 - ) - - migratedHostMetadata, err := getMigratedHostMetadata(data) - if err != nil { - return nil, false, err - } - - globalStorePath := filepath.Dir(filepath.Dir(migratedHostMetadata.HostOptions.AuthOptions.StorePath)) - - driver := &RawDataDriver{none.NewDriver(h.Name, globalStorePath), nil} - - if migratedHostMetadata.ConfigVersion > version.ConfigVersion { - return nil, false, errConfigFromFuture - } - - if migratedHostMetadata.ConfigVersion == version.ConfigVersion { - h.Driver = driver - if err := json.Unmarshal(data, &h); err != nil { - return nil, migrationPerformed, fmt.Errorf("Error unmarshalling most recent host version: %s", err) - } - } else { - migrationNeeded = true - } - - if migrationNeeded { - migrationPerformed = true - for h.ConfigVersion = migratedHostMetadata.ConfigVersion; h.ConfigVersion < version.ConfigVersion; h.ConfigVersion++ { - log.Debugf("Migrating to config v%d", h.ConfigVersion) - switch h.ConfigVersion { - case 0: - hostV0 := &V0{ - Driver: driver, - } - if err := json.Unmarshal(data, &hostV0); err != nil { - return nil, migrationPerformed, fmt.Errorf("Error unmarshalling host config version 0: %s", err) - } - hostV1 = MigrateHostV0ToHostV1(hostV0) - case 1: - if hostV1 == nil { - hostV1 = &V1{ - Driver: driver, - } - if err := json.Unmarshal(data, &hostV1); err != nil { - return nil, migrationPerformed, fmt.Errorf("Error unmarshalling host config version 1: %s", err) - } - } - hostV2 = MigrateHostV1ToHostV2(hostV1) - case 2: - if hostV2 == nil { - hostV2 = &V2{ - Driver: driver, - } - if err := json.Unmarshal(data, &hostV2); err != nil { - return nil, migrationPerformed, fmt.Errorf("Error unmarshalling host config version 2: %s", err) - } - } - h = MigrateHostV2ToHostV3(hostV2, data, globalStorePath) - driver.Data = h.RawDriver - h.Driver = driver - case 3: - } - } - } - - h.RawDriver = driver.Data - - return h, migrationPerformed, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/host/migrate_v0_v1.go b/vendor/github.com/docker/machine/libmachine/host/migrate_v0_v1.go deleted file mode 100644 index 6460d38e6562..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/migrate_v0_v1.go +++ /dev/null @@ -1,73 +0,0 @@ -package host - -import ( - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/swarm" -) - -// In the 0.1.0 => 0.2.0 transition, the JSON representation of -// machines changed from a "flat" to a more "nested" structure -// for various options and configuration settings. To preserve -// compatibility with existing machines, these migration functions -// have been introduced. They preserve backwards compat at the expense -// of some duplicated information. - -// MigrateHostV0ToHostV1 validates host config and modifies if needed -// this is used for configuration updates -func MigrateHostV0ToHostV1(hostV0 *V0) *V1 { - hostV1 := &V1{ - Driver: hostV0.Driver, - DriverName: hostV0.DriverName, - } - - hostV1.HostOptions = &OptionsV1{} - hostV1.HostOptions.EngineOptions = &engine.Options{ - TLSVerify: true, - InstallURL: "https://get.docker.com", - } - hostV1.HostOptions.SwarmOptions = &swarm.Options{ - Address: "", - Discovery: hostV0.SwarmDiscovery, - Host: hostV0.SwarmHost, - Master: hostV0.SwarmMaster, - } - hostV1.HostOptions.AuthOptions = &AuthOptionsV1{ - StorePath: hostV0.StorePath, - CaCertPath: hostV0.CaCertPath, - CaCertRemotePath: "", - ServerCertPath: hostV0.ServerCertPath, - ServerKeyPath: hostV0.ServerKeyPath, - ClientKeyPath: hostV0.ClientKeyPath, - ServerCertRemotePath: "", - ServerKeyRemotePath: "", - PrivateKeyPath: hostV0.PrivateKeyPath, - ClientCertPath: hostV0.ClientCertPath, - } - - return hostV1 -} - -// MigrateHostMetadataV0ToHostMetadataV1 fills nested host metadata and modifies if needed -// this is used for configuration updates -func MigrateHostMetadataV0ToHostMetadataV1(m *MetadataV0) *Metadata { - hostMetadata := &Metadata{} - hostMetadata.DriverName = m.DriverName - hostMetadata.HostOptions.EngineOptions = &engine.Options{} - hostMetadata.HostOptions.AuthOptions = &auth.Options{ - StorePath: m.StorePath, - CaCertPath: m.CaCertPath, - CaCertRemotePath: "", - ServerCertPath: m.ServerCertPath, - ServerKeyPath: m.ServerKeyPath, - ClientKeyPath: "", - ServerCertRemotePath: "", - ServerKeyRemotePath: "", - CaPrivateKeyPath: m.PrivateKeyPath, - ClientCertPath: m.ClientCertPath, - } - - hostMetadata.ConfigVersion = m.ConfigVersion - - return hostMetadata -} diff --git a/vendor/github.com/docker/machine/libmachine/host/migrate_v1_v2.go b/vendor/github.com/docker/machine/libmachine/host/migrate_v1_v2.go deleted file mode 100644 index 6931993d134c..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/migrate_v1_v2.go +++ /dev/null @@ -1,78 +0,0 @@ -package host - -import ( - "path/filepath" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/swarm" -) - -type AuthOptionsV1 struct { - StorePath string - CaCertPath string - CaCertRemotePath string - ServerCertPath string - ServerKeyPath string - ClientKeyPath string - ServerCertRemotePath string - ServerKeyRemotePath string - PrivateKeyPath string - ClientCertPath string -} - -type OptionsV1 struct { - Driver string - Memory int - Disk int - EngineOptions *engine.Options - SwarmOptions *swarm.Options - AuthOptions *AuthOptionsV1 -} - -type V1 struct { - ConfigVersion int - Driver drivers.Driver - DriverName string - HostOptions *OptionsV1 - Name string `json:"-"` - StorePath string -} - -func MigrateHostV1ToHostV2(hostV1 *V1) *V2 { - // Changed: Put StorePath directly in AuthOptions (for provisioning), - // and AuthOptions.PrivateKeyPath => AuthOptions.CaPrivateKeyPath - // Also, CertDir has been added. - - globalStorePath := filepath.Dir(filepath.Dir(hostV1.StorePath)) - - h := &V2{ - ConfigVersion: hostV1.ConfigVersion, - Driver: hostV1.Driver, - Name: hostV1.Driver.GetMachineName(), - DriverName: hostV1.DriverName, - HostOptions: &Options{ - Driver: hostV1.HostOptions.Driver, - Memory: hostV1.HostOptions.Memory, - Disk: hostV1.HostOptions.Disk, - EngineOptions: hostV1.HostOptions.EngineOptions, - SwarmOptions: hostV1.HostOptions.SwarmOptions, - AuthOptions: &auth.Options{ - CertDir: filepath.Join(globalStorePath, "certs"), - CaCertPath: hostV1.HostOptions.AuthOptions.CaCertPath, - CaPrivateKeyPath: hostV1.HostOptions.AuthOptions.PrivateKeyPath, - CaCertRemotePath: hostV1.HostOptions.AuthOptions.CaCertRemotePath, - ServerCertPath: hostV1.HostOptions.AuthOptions.ServerCertPath, - ServerKeyPath: hostV1.HostOptions.AuthOptions.ServerKeyPath, - ClientKeyPath: hostV1.HostOptions.AuthOptions.ClientKeyPath, - ServerCertRemotePath: hostV1.HostOptions.AuthOptions.ServerCertRemotePath, - ServerKeyRemotePath: hostV1.HostOptions.AuthOptions.ServerKeyRemotePath, - ClientCertPath: hostV1.HostOptions.AuthOptions.ClientCertPath, - StorePath: globalStorePath, - }, - }, - } - - return h -} diff --git a/vendor/github.com/docker/machine/libmachine/host/migrate_v2_v3.go b/vendor/github.com/docker/machine/libmachine/host/migrate_v2_v3.go deleted file mode 100644 index 12bcbe5f0654..000000000000 --- a/vendor/github.com/docker/machine/libmachine/host/migrate_v2_v3.go +++ /dev/null @@ -1,49 +0,0 @@ -package host - -import ( - "bytes" - "encoding/json" - - "github.com/docker/machine/libmachine/log" -) - -type RawHost struct { - Driver *json.RawMessage -} - -func MigrateHostV2ToHostV3(hostV2 *V2, data []byte, storePath string) *Host { - // Migrate to include RawDriver so that driver plugin will work - // smoothly. - rawHost := &RawHost{} - if err := json.Unmarshal(data, &rawHost); err != nil { - log.Warnf("Could not unmarshal raw host for RawDriver information: %s", err) - } - - m := make(map[string]interface{}) - - // Must migrate to include store path in driver since it was not - // previously stored in drivers directly - d := json.NewDecoder(bytes.NewReader(*rawHost.Driver)) - d.UseNumber() - if err := d.Decode(&m); err != nil { - log.Warnf("Could not unmarshal raw host into map[string]interface{}: %s", err) - } - - m["StorePath"] = storePath - - // Now back to []byte - rawDriver, err := json.Marshal(m) - if err != nil { - log.Warnf("Could not re-marshal raw driver: %s", err) - } - - h := &Host{ - ConfigVersion: 2, - DriverName: hostV2.DriverName, - Name: hostV2.Name, - HostOptions: hostV2.HostOptions, - RawDriver: rawDriver, - } - - return h -} diff --git a/vendor/github.com/docker/machine/libmachine/libmachine.go b/vendor/github.com/docker/machine/libmachine/libmachine.go deleted file mode 100644 index 5d0c8fbc8646..000000000000 --- a/vendor/github.com/docker/machine/libmachine/libmachine.go +++ /dev/null @@ -1,188 +0,0 @@ -package libmachine - -import ( - "fmt" - "path/filepath" - - "io" - - "github.com/docker/machine/drivers/errdriver" - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/cert" - "github.com/docker/machine/libmachine/check" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin/localbinary" - "github.com/docker/machine/libmachine/drivers/rpc" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/host" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnerror" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/persist" - "github.com/docker/machine/libmachine/provision" - "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" - "github.com/docker/machine/libmachine/swarm" - "github.com/docker/machine/libmachine/version" -) - -type API interface { - io.Closer - NewHost(driverName string, rawDriver []byte) (*host.Host, error) - Create(h *host.Host) error - persist.Store - GetMachinesDir() string -} - -type Client struct { - certsDir string - IsDebug bool - SSHClientType ssh.ClientType - GithubAPIToken string - *persist.Filestore - clientDriverFactory rpcdriver.RPCClientDriverFactory -} - -func NewClient(storePath, certsDir string) *Client { - return &Client{ - certsDir: certsDir, - IsDebug: false, - SSHClientType: ssh.External, - Filestore: persist.NewFilestore(storePath, certsDir, certsDir), - clientDriverFactory: rpcdriver.NewRPCClientDriverFactory(), - } -} - -func (api *Client) NewHost(driverName string, rawDriver []byte) (*host.Host, error) { - driver, err := api.clientDriverFactory.NewRPCClientDriver(driverName, rawDriver) - if err != nil { - return nil, err - } - - return &host.Host{ - ConfigVersion: version.ConfigVersion, - Name: driver.GetMachineName(), - Driver: driver, - DriverName: driver.DriverName(), - HostOptions: &host.Options{ - AuthOptions: &auth.Options{ - CertDir: api.certsDir, - CaCertPath: filepath.Join(api.certsDir, "ca.pem"), - CaPrivateKeyPath: filepath.Join(api.certsDir, "ca-key.pem"), - ClientCertPath: filepath.Join(api.certsDir, "cert.pem"), - ClientKeyPath: filepath.Join(api.certsDir, "key.pem"), - ServerCertPath: filepath.Join(api.GetMachinesDir(), "server.pem"), - ServerKeyPath: filepath.Join(api.GetMachinesDir(), "server-key.pem"), - }, - EngineOptions: &engine.Options{ - InstallURL: drivers.DefaultEngineInstallURL, - StorageDriver: "overlay2", - TLSVerify: true, - }, - SwarmOptions: &swarm.Options{ - Host: "tcp://0.0.0.0:3376", - Image: "swarm:latest", - Strategy: "spread", - }, - }, - }, nil -} - -func (api *Client) Load(name string) (*host.Host, error) { - h, err := api.Filestore.Load(name) - if err != nil { - return nil, err - } - - d, err := api.clientDriverFactory.NewRPCClientDriver(h.DriverName, h.RawDriver) - if err != nil { - // Not being able to find a driver binary is a "known error" - if _, ok := err.(localbinary.ErrPluginBinaryNotFound); ok { - h.Driver = errdriver.NewDriver(h.DriverName) - return h, nil - } - return nil, err - } - - if h.DriverName == "virtualbox" { - h.Driver = drivers.NewSerialDriver(d) - } else { - h.Driver = d - } - - return h, nil -} - -// Create is the wrapper method which covers all of the boilerplate around -// actually creating, provisioning, and persisting an instance in the store. -func (api *Client) Create(h *host.Host) error { - if err := cert.BootstrapCertificates(h.AuthOptions()); err != nil { - return fmt.Errorf("Error generating certificates: %s", err) - } - - log.Info("Running pre-create checks...") - - if err := h.Driver.PreCreateCheck(); err != nil { - return mcnerror.ErrDuringPreCreate{ - Cause: err, - } - } - - if err := api.Save(h); err != nil { - return fmt.Errorf("Error saving host to store before attempting creation: %s", err) - } - - log.Info("Creating machine...") - - if err := api.performCreate(h); err != nil { - return fmt.Errorf("Error creating machine: %s", err) - } - - log.Debug("Reticulating splines...") - - return nil -} - -func (api *Client) performCreate(h *host.Host) error { - if err := h.Driver.Create(); err != nil { - return fmt.Errorf("Error in driver during machine creation: %s", err) - } - - if err := api.Save(h); err != nil { - return fmt.Errorf("Error saving host to store after attempting creation: %s", err) - } - - // TODO: Not really a fan of just checking "none" or "ci-test" here. - if h.Driver.DriverName() == "none" || h.Driver.DriverName() == "ci-test" { - return nil - } - - log.Info("Waiting for machine to be running, this may take a few minutes...") - if err := mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running)); err != nil { - return fmt.Errorf("Error waiting for machine to be running: %s", err) - } - - log.Info("Detecting operating system of created instance...") - provisioner, err := provision.DetectProvisioner(h.Driver) - if err != nil { - return fmt.Errorf("Error detecting OS: %s", err) - } - - log.Infof("Provisioning with %s...", provisioner.String()) - if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { - return fmt.Errorf("Error running provisioning: %s", err) - } - - // We should check the connection to docker here - log.Info("Checking connection to Docker...") - if _, _, err = check.DefaultConnChecker.Check(h, false); err != nil { - return fmt.Errorf("Error checking the host: %s", err) - } - - log.Info("Docker is up and running!") - return nil -} - -func (api *Client) Close() error { - return api.clientDriverFactory.Close() -} diff --git a/vendor/github.com/docker/machine/libmachine/log/fmt_machine_logger.go b/vendor/github.com/docker/machine/libmachine/log/fmt_machine_logger.go deleted file mode 100644 index 285e69949fa3..000000000000 --- a/vendor/github.com/docker/machine/libmachine/log/fmt_machine_logger.go +++ /dev/null @@ -1,84 +0,0 @@ -package log - -import ( - "fmt" - "io" - "os" -) - -type FmtMachineLogger struct { - outWriter io.Writer - errWriter io.Writer - debug bool - history *HistoryRecorder -} - -// NewFmtMachineLogger creates a MachineLogger implementation used by the drivers -func NewFmtMachineLogger() MachineLogger { - return &FmtMachineLogger{ - outWriter: os.Stdout, - errWriter: os.Stderr, - debug: false, - history: NewHistoryRecorder(), - } -} - -func (ml *FmtMachineLogger) SetDebug(debug bool) { - ml.debug = debug -} - -func (ml *FmtMachineLogger) SetOutWriter(out io.Writer) { - ml.outWriter = out -} - -func (ml *FmtMachineLogger) SetErrWriter(err io.Writer) { - ml.errWriter = err -} - -func (ml *FmtMachineLogger) Debug(args ...interface{}) { - ml.history.Record(args...) - if ml.debug { - fmt.Fprintln(ml.errWriter, args...) - } -} - -func (ml *FmtMachineLogger) Debugf(fmtString string, args ...interface{}) { - ml.history.Recordf(fmtString, args...) - if ml.debug { - fmt.Fprintf(ml.errWriter, fmtString+"\n", args...) - } -} - -func (ml *FmtMachineLogger) Error(args ...interface{}) { - ml.history.Record(args...) - fmt.Fprintln(ml.errWriter, args...) -} - -func (ml *FmtMachineLogger) Errorf(fmtString string, args ...interface{}) { - ml.history.Recordf(fmtString, args...) - fmt.Fprintf(ml.errWriter, fmtString+"\n", args...) -} - -func (ml *FmtMachineLogger) Info(args ...interface{}) { - ml.history.Record(args...) - fmt.Fprintln(ml.outWriter, args...) -} - -func (ml *FmtMachineLogger) Infof(fmtString string, args ...interface{}) { - ml.history.Recordf(fmtString, args...) - fmt.Fprintf(ml.outWriter, fmtString+"\n", args...) -} - -func (ml *FmtMachineLogger) Warn(args ...interface{}) { - ml.history.Record(args...) - fmt.Fprintln(ml.outWriter, args...) -} - -func (ml *FmtMachineLogger) Warnf(fmtString string, args ...interface{}) { - ml.history.Recordf(fmtString, args...) - fmt.Fprintf(ml.outWriter, fmtString+"\n", args...) -} - -func (ml *FmtMachineLogger) History() []string { - return ml.history.records -} diff --git a/vendor/github.com/docker/machine/libmachine/log/history_recorder.go b/vendor/github.com/docker/machine/libmachine/log/history_recorder.go deleted file mode 100644 index defdbad69124..000000000000 --- a/vendor/github.com/docker/machine/libmachine/log/history_recorder.go +++ /dev/null @@ -1,34 +0,0 @@ -package log - -import ( - "fmt" - "sync" -) - -type HistoryRecorder struct { - lock *sync.Mutex - records []string -} - -func NewHistoryRecorder() *HistoryRecorder { - return &HistoryRecorder{ - lock: &sync.Mutex{}, - records: []string{}, - } -} - -func (ml *HistoryRecorder) History() []string { - return ml.records -} - -func (ml *HistoryRecorder) Record(args ...interface{}) { - ml.lock.Lock() - defer ml.lock.Unlock() - ml.records = append(ml.records, fmt.Sprint(args...)) -} - -func (ml *HistoryRecorder) Recordf(fmtString string, args ...interface{}) { - ml.lock.Lock() - defer ml.lock.Unlock() - ml.records = append(ml.records, fmt.Sprintf(fmtString, args...)) -} diff --git a/vendor/github.com/docker/machine/libmachine/log/log.go b/vendor/github.com/docker/machine/libmachine/log/log.go deleted file mode 100644 index 5a5823bc878c..000000000000 --- a/vendor/github.com/docker/machine/libmachine/log/log.go +++ /dev/null @@ -1,74 +0,0 @@ -package log - -import ( - "io" - "regexp" -) - -const redactedText = "" - -var ( - logger = NewFmtMachineLogger() - - // (?s) enables '.' to match '\n' -- see https://golang.org/pkg/regexp/syntax/ - certRegex = regexp.MustCompile("(?s)-----BEGIN CERTIFICATE-----.*-----END CERTIFICATE-----") - keyRegex = regexp.MustCompile("(?s)-----BEGIN RSA PRIVATE KEY-----.*-----END RSA PRIVATE KEY-----") -) - -func stripSecrets(original []string) []string { - stripped := []string{} - for _, line := range original { - line = certRegex.ReplaceAllString(line, redactedText) - line = keyRegex.ReplaceAllString(line, redactedText) - stripped = append(stripped, line) - } - return stripped -} - -func Debug(args ...interface{}) { - logger.Debug(args...) -} - -func Debugf(fmtString string, args ...interface{}) { - logger.Debugf(fmtString, args...) -} - -func Error(args ...interface{}) { - logger.Error(args...) -} - -func Errorf(fmtString string, args ...interface{}) { - logger.Errorf(fmtString, args...) -} - -func Info(args ...interface{}) { - logger.Info(args...) -} - -func Infof(fmtString string, args ...interface{}) { - logger.Infof(fmtString, args...) -} - -func Warn(args ...interface{}) { - logger.Warn(args...) -} - -func Warnf(fmtString string, args ...interface{}) { - logger.Warnf(fmtString, args...) -} - -func SetDebug(debug bool) { - logger.SetDebug(debug) -} - -func SetOutWriter(out io.Writer) { - logger.SetOutWriter(out) -} - -func SetErrWriter(err io.Writer) { - logger.SetErrWriter(err) -} - -func History() []string { - return stripSecrets(logger.History()) -} diff --git a/vendor/github.com/docker/machine/libmachine/log/machine_logger.go b/vendor/github.com/docker/machine/libmachine/log/machine_logger.go deleted file mode 100644 index 19f613e230ca..000000000000 --- a/vendor/github.com/docker/machine/libmachine/log/machine_logger.go +++ /dev/null @@ -1,24 +0,0 @@ -package log - -import "io" - -type MachineLogger interface { - SetDebug(debug bool) - - SetOutWriter(io.Writer) - SetErrWriter(io.Writer) - - Debug(args ...interface{}) - Debugf(fmtString string, args ...interface{}) - - Error(args ...interface{}) - Errorf(fmtString string, args ...interface{}) - - Info(args ...interface{}) - Infof(fmtString string, args ...interface{}) - - Warn(args ...interface{}) - Warnf(fmtString string, args ...interface{}) - - History() []string -} diff --git a/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_client.go b/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_client.go deleted file mode 100644 index 828dc7614e5d..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_client.go +++ /dev/null @@ -1,47 +0,0 @@ -package mcndockerclient - -import ( - "fmt" - - "github.com/docker/machine/libmachine/cert" - "github.com/samalba/dockerclient" -) - -// DockerClient creates a docker client for a given host. -func DockerClient(dockerHost DockerHost) (*dockerclient.DockerClient, error) { - url, err := dockerHost.URL() - if err != nil { - return nil, err - } - - tlsConfig, err := cert.ReadTLSConfig(url, dockerHost.AuthOptions()) - if err != nil { - return nil, fmt.Errorf("Unable to read TLS config: %s", err) - } - - return dockerclient.NewDockerClient(url, tlsConfig) -} - -// CreateContainer creates a docker container. -func CreateContainer(dockerHost DockerHost, config *dockerclient.ContainerConfig, name string) error { - docker, err := DockerClient(dockerHost) - if err != nil { - return err - } - - if err = docker.PullImage(config.Image, nil); err != nil { - return fmt.Errorf("Unable to pull image: %s", err) - } - - var authConfig *dockerclient.AuthConfig - containerID, err := docker.CreateContainer(config, name, authConfig) - if err != nil { - return fmt.Errorf("Error while creating container: %s", err) - } - - if err = docker.StartContainer(containerID, &config.HostConfig); err != nil { - return fmt.Errorf("Error while starting container: %s", err) - } - - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_host.go b/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_host.go deleted file mode 100644 index 43d774edb375..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_host.go +++ /dev/null @@ -1,41 +0,0 @@ -package mcndockerclient - -import ( - "fmt" - - "github.com/docker/machine/libmachine/auth" -) - -type URLer interface { - // URL returns the Docker host URL - URL() (string, error) -} - -type AuthOptionser interface { - // AuthOptions returns the authOptions - AuthOptions() *auth.Options -} - -type DockerHost interface { - URLer - AuthOptionser -} - -type RemoteDocker struct { - HostURL string - AuthOption *auth.Options -} - -// URL returns the Docker host URL -func (rd *RemoteDocker) URL() (string, error) { - if rd.HostURL == "" { - return "", fmt.Errorf("Docker Host URL not set") - } - - return rd.HostURL, nil -} - -// AuthOptions returns the authOptions -func (rd *RemoteDocker) AuthOptions() *auth.Options { - return rd.AuthOption -} diff --git a/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_versioner.go b/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_versioner.go deleted file mode 100644 index b8279c777e04..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcndockerclient/docker_versioner.go +++ /dev/null @@ -1,29 +0,0 @@ -package mcndockerclient - -import "fmt" - -var CurrentDockerVersioner DockerVersioner = &defaultDockerVersioner{} - -type DockerVersioner interface { - DockerVersion(host DockerHost) (string, error) -} - -func DockerVersion(host DockerHost) (string, error) { - return CurrentDockerVersioner.DockerVersion(host) -} - -type defaultDockerVersioner struct{} - -func (dv *defaultDockerVersioner) DockerVersion(host DockerHost) (string, error) { - client, err := DockerClient(host) - if err != nil { - return "", fmt.Errorf("Unable to query docker version: %s", err) - } - - version, err := client.Version() - if err != nil { - return "", fmt.Errorf("Unable to query docker version: %s", err) - } - - return version.Version, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/mcndockerclient/fake_docker_versioner.go b/vendor/github.com/docker/machine/libmachine/mcndockerclient/fake_docker_versioner.go deleted file mode 100644 index 32c4bb4cdf82..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcndockerclient/fake_docker_versioner.go +++ /dev/null @@ -1,14 +0,0 @@ -package mcndockerclient - -type FakeDockerVersioner struct { - Version string - Err error -} - -func (dv *FakeDockerVersioner) DockerVersion(host DockerHost) (string, error) { - if dv.Err != nil { - return "", dv.Err - } - - return dv.Version, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/mcnerror/errors.go b/vendor/github.com/docker/machine/libmachine/mcnerror/errors.go deleted file mode 100644 index 6efc3c5c477d..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcnerror/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -package mcnerror - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/machine/libmachine/state" -) - -var ( - ErrInvalidHostname = errors.New("Invalid hostname specified. Allowed hostname chars are: 0-9a-zA-Z . -") -) - -type ErrHostDoesNotExist struct { - Name string -} - -func (e ErrHostDoesNotExist) Error() string { - return fmt.Sprintf("Docker machine %q does not exist. Use \"docker-machine ls\" to list machines. Use \"docker-machine create\" to add a new one.", e.Name) -} - -type ErrHostAlreadyExists struct { - Name string -} - -func (e ErrHostAlreadyExists) Error() string { - return fmt.Sprintf("Docker machine %q already exists", e.Name) -} - -type ErrDuringPreCreate struct { - Cause error -} - -func (e ErrDuringPreCreate) Error() string { - return fmt.Sprintf("Error with pre-create check: %q", e.Cause) -} - -type ErrHostAlreadyInState struct { - Name string - State state.State -} - -func (e ErrHostAlreadyInState) Error() string { - return fmt.Sprintf("Machine %q is already %s.", e.Name, strings.ToLower(e.State.String())) -} diff --git a/vendor/github.com/docker/machine/libmachine/mcnflag/flag.go b/vendor/github.com/docker/machine/libmachine/mcnflag/flag.go deleted file mode 100644 index ec24d130aee1..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcnflag/flag.go +++ /dev/null @@ -1,71 +0,0 @@ -package mcnflag - -import "fmt" - -type Flag interface { - fmt.Stringer - Default() interface{} -} - -type StringFlag struct { - Name string - Usage string - EnvVar string - Value string -} - -// TODO: Could this be done more succinctly using embedding? -func (f StringFlag) String() string { - return f.Name -} - -func (f StringFlag) Default() interface{} { - return f.Value -} - -type StringSliceFlag struct { - Name string - Usage string - EnvVar string - Value []string -} - -// TODO: Could this be done more succinctly using embedding? -func (f StringSliceFlag) String() string { - return f.Name -} - -func (f StringSliceFlag) Default() interface{} { - return f.Value -} - -type IntFlag struct { - Name string - Usage string - EnvVar string - Value int -} - -// TODO: Could this be done more succinctly using embedding? -func (f IntFlag) String() string { - return f.Name -} - -func (f IntFlag) Default() interface{} { - return f.Value -} - -type BoolFlag struct { - Name string - Usage string - EnvVar string -} - -// TODO: Could this be done more succinctly using embedding? -func (f BoolFlag) String() string { - return f.Name -} - -func (f BoolFlag) Default() interface{} { - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/mcnutils/b2d.go b/vendor/github.com/docker/machine/libmachine/mcnutils/b2d.go deleted file mode 100644 index f2691954224b..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcnutils/b2d.go +++ /dev/null @@ -1,542 +0,0 @@ -package mcnutils - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/version" -) - -const ( - defaultURL = "https://api.github.com/repos/boot2docker/boot2docker/releases" - defaultISOFilename = "boot2docker.iso" - defaultVolumeIDOffset = int64(0x8028) - versionPrefix = "-v" - defaultVolumeIDLength = 32 -) - -var ( - GithubAPIToken string -) - -var ( - errGitHubAPIResponse = errors.New(`failure getting a version tag from the Github API response (are you getting rate limited by Github?)`) -) - -var ( - AUFSBugB2DVersions = map[string]string{ - "v1.9.1": "https://github.com/docker/docker/issues/18180", - } -) - -func defaultTimeout(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -func getClient() *http.Client { - transport := http.Transport{ - DisableKeepAlives: true, - Proxy: http.ProxyFromEnvironment, - Dial: defaultTimeout, - } - - return &http.Client{ - Transport: &transport, - } -} - -func getRequest(apiURL string) (*http.Request, error) { - req, err := http.NewRequest("GET", apiURL, nil) - if err != nil { - return nil, err - } - - if GithubAPIToken != "" { - req.Header.Add("Authorization", fmt.Sprintf("token %s", GithubAPIToken)) - } - - return req, nil -} - -// releaseGetter is a client that gets release information of a product and downloads it. -type releaseGetter interface { - // filename returns filename of the product. - filename() string - // getReleaseTag gets a release tag from the given URL. - getReleaseTag(apiURL string) (string, error) - // getReleaseURL gets the latest release download URL from the given URL. - getReleaseURL(apiURL string) (string, error) - // download downloads a file from the given dlURL and saves it under dir. - download(dir, file, dlURL string) error -} - -// b2dReleaseGetter implements the releaseGetter interface for getting the release of Boot2Docker. -type b2dReleaseGetter struct { - isoFilename string -} - -func (b *b2dReleaseGetter) filename() string { - if b == nil { - return "" - } - return b.isoFilename -} - -// getReleaseTag gets the release tag of Boot2Docker from apiURL. -func (*b2dReleaseGetter) getReleaseTag(apiURL string) (string, error) { - if apiURL == "" { - apiURL = defaultURL - } - - if !version.RC() { - // Just go straight to the convenience URL for "/latest" if we - // are a non-release candidate version. "/latest" won't return - // non-RCs, so that's what we use for stable releases of - // Machine. - apiURL = apiURL + "/latest" - } - - client := getClient() - req, err := getRequest(apiURL) - if err != nil { - return "", err - } - rsp, err := client.Do(req) - if err != nil { - return "", err - } - defer rsp.Body.Close() - - // If we call the API endpoint - // "/repos/boot2docker/boot2docker/releases" without specifying - // "/latest", we will receive a list of releases instead of a single - // one, and we should decode accordingly. - if version.RC() { - var tags []struct { - TagName string `json:"tag_name"` - } - if err := json.NewDecoder(rsp.Body).Decode(&tags); err != nil { - return "", err - } - t := tags[0] - if t.TagName == "" { - return "", errGitHubAPIResponse - } - return t.TagName, nil - } - - // Otherwise, we get back just one release, which we can decode to get - // the tag. - var t struct { - TagName string `json:"tag_name"` - } - if err := json.NewDecoder(rsp.Body).Decode(&t); err != nil { - return "", err - } - if t.TagName == "" { - return "", errGitHubAPIResponse - } - return t.TagName, nil -} - -// getReleaseURL gets the latest release URL of Boot2Docker. -func (b *b2dReleaseGetter) getReleaseURL(apiURL string) (string, error) { - if apiURL == "" { - apiURL = defaultURL - } - - // match github (enterprise) release urls: - // https://api.github.com/repos/../../releases or - // https://some.github.enterprise/api/v3/repos/../../releases - re := regexp.MustCompile("(https?)://([^/]+)(/api/v3)?/repos/([^/]+)/([^/]+)/releases") - matches := re.FindStringSubmatch(apiURL) - if len(matches) != 6 { - // does not match a github releases api URL - return apiURL, nil - } - - scheme, host, org, repo := matches[1], matches[2], matches[4], matches[5] - if host == "api.github.com" { - host = "github.com" - } - - tag, err := b.getReleaseTag(apiURL) - if err != nil { - return "", err - } - - log.Infof("Latest release for %s/%s/%s is %s", host, org, repo, tag) - bugURL, ok := AUFSBugB2DVersions[tag] - if ok { - log.Warnf(` -Boot2Docker %s has a known issue with AUFS. -See here for more details: %s -Consider specifying another storage driver (e.g. 'overlay') using '--engine-storage-driver' instead. -`, tag, bugURL) - } - url := fmt.Sprintf("%s://%s/%s/%s/releases/download/%s/%s", scheme, host, org, repo, tag, b.isoFilename) - return url, nil -} - -func (*b2dReleaseGetter) download(dir, file, isoURL string) error { - u, err := url.Parse(isoURL) - - var src io.ReadCloser - if u.Scheme == "file" || u.Scheme == "" { - s, err := os.Open(u.Path) - if err != nil { - return err - } - - src = s - } else { - client := getClient() - s, err := client.Get(isoURL) - if err != nil { - return err - } - - src = &ReaderWithProgress{ - ReadCloser: s.Body, - out: os.Stdout, - expectedLength: s.ContentLength, - } - } - - defer src.Close() - - // Download to a temp file first then rename it to avoid partial download. - f, err := ioutil.TempFile(dir, file+".tmp") - if err != nil { - return err - } - - defer func() { - if err := removeFileIfExists(f.Name()); err != nil { - log.Warnf("Error removing file: %s", err) - } - }() - - if _, err := io.Copy(f, src); err != nil { - return err - } - - if err := f.Close(); err != nil { - return err - } - - // Dest is the final path of the boot2docker.iso file. - dest := filepath.Join(dir, file) - - // Windows can't rename in place, so remove the old file before - // renaming the temporary downloaded file. - if err := removeFileIfExists(dest); err != nil { - return err - } - - return os.Rename(f.Name(), dest) -} - -// iso is an ISO volume. -type iso interface { - // path returns the path of the ISO. - path() string - // exists reports whether the ISO exists. - exists() bool - // version returns version information of the ISO. - version() (string, error) -} - -// b2dISO represents a Boot2Docker ISO. It implements the ISO interface. -type b2dISO struct { - // path of Boot2Docker ISO - commonIsoPath string - - // offset and length of ISO volume ID - // cf. http://serverfault.com/questions/361474/is-there-a-way-to-change-a-iso-files-volume-id-from-the-command-line - volumeIDOffset int64 - volumeIDLength int -} - -func (b *b2dISO) path() string { - if b == nil { - return "" - } - return b.commonIsoPath -} - -func (b *b2dISO) exists() bool { - if b == nil { - return false - } - - _, err := os.Stat(b.commonIsoPath) - return !os.IsNotExist(err) -} - -// version scans the volume ID in b and returns its version tag. -func (b *b2dISO) version() (string, error) { - if b == nil { - return "", nil - } - - iso, err := os.Open(b.commonIsoPath) - if err != nil { - return "", err - } - defer iso.Close() - - isoMetadata := make([]byte, b.volumeIDLength) - _, err = iso.ReadAt(isoMetadata, b.volumeIDOffset) - if err != nil { - return "", err - } - - trimmedVersion := strings.TrimSpace(string(isoMetadata)) - - versionIndex := strings.Index(trimmedVersion, versionPrefix) - if versionIndex == -1 { - return "", fmt.Errorf("Did not find prefix %q in version string", versionPrefix) - } - - // Original magic file string looks similar to this: "Boot2Docker-v0.1.0 " - // This will return "v0.1.0" given the above string - vers := trimmedVersion[versionIndex+1:] - - log.Debug("local Boot2Docker ISO version: ", vers) - return vers, nil -} - -func removeFileIfExists(name string) error { - if _, err := os.Stat(name); err == nil { - if err := os.Remove(name); err != nil { - return fmt.Errorf("Error removing temporary download file: %s", err) - } - } - return nil -} - -type B2dUtils struct { - releaseGetter - iso - storePath string - imgCachePath string -} - -func NewB2dUtils(storePath string) *B2dUtils { - imgCachePath := filepath.Join(storePath, "cache") - - return &B2dUtils{ - releaseGetter: &b2dReleaseGetter{isoFilename: defaultISOFilename}, - iso: &b2dISO{ - commonIsoPath: filepath.Join(imgCachePath, defaultISOFilename), - volumeIDOffset: defaultVolumeIDOffset, - volumeIDLength: defaultVolumeIDLength, - }, - storePath: storePath, - imgCachePath: imgCachePath, - } -} - -// DownloadISO downloads boot2docker ISO image for the given tag and save it at dest. -func (b *B2dUtils) DownloadISO(dir, file, isoURL string) error { - log.Infof("Downloading %s from %s...", b.path(), isoURL) - return b.download(dir, file, isoURL) -} - -type ReaderWithProgress struct { - io.ReadCloser - out io.Writer - bytesTransferred int64 - expectedLength int64 - nextPercentToPrint int64 -} - -func (r *ReaderWithProgress) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - - if n > 0 { - r.bytesTransferred += int64(n) - percentage := r.bytesTransferred * 100 / r.expectedLength - - for percentage >= r.nextPercentToPrint { - if r.nextPercentToPrint%10 == 0 { - fmt.Fprintf(r.out, "%d%%", r.nextPercentToPrint) - } else if r.nextPercentToPrint%2 == 0 { - fmt.Fprint(r.out, ".") - } - r.nextPercentToPrint += 2 - } - } - - return n, err -} - -func (r *ReaderWithProgress) Close() error { - fmt.Fprintln(r.out) - return r.ReadCloser.Close() -} - -func (b *B2dUtils) DownloadLatestBoot2Docker(apiURL string) error { - latestReleaseURL, err := b.getReleaseURL(apiURL) - if err != nil { - return err - } - - return b.DownloadISOFromURL(latestReleaseURL) -} - -func (b *B2dUtils) DownloadISOFromURL(latestReleaseURL string) error { - return b.DownloadISO(b.imgCachePath, b.filename(), latestReleaseURL) -} - -func (b *B2dUtils) UpdateISOCache(isoURL string) error { - // recreate the cache dir if it has been manually deleted - if _, err := os.Stat(b.imgCachePath); os.IsNotExist(err) { - log.Infof("Image cache directory does not exist, creating it at %s...", b.imgCachePath) - if err := os.Mkdir(b.imgCachePath, 0700); err != nil { - return err - } - } - - exists := b.exists() - - if isoURL != "" { - if exists { - // Warn that the b2d iso won't be updated if isoURL is set - log.Warnf("Boot2Docker URL was explicitly set to %q at create time, so Docker Machine cannot upgrade this machine to the latest version.", isoURL) - } - // Non-default B2D are not cached - return nil - } - - if !exists { - log.Info("No default Boot2Docker ISO found locally, downloading the latest release...") - return b.DownloadLatestBoot2Docker("") - } - - latest := b.isLatest() - if !latest { - log.Info("Default Boot2Docker ISO is out-of-date, downloading the latest release...") - return b.DownloadLatestBoot2Docker("") - } - - return nil -} - -func (b *B2dUtils) CopyIsoToMachineDir(isoURL, machineName string) error { - if err := b.UpdateISOCache(isoURL); err != nil { - return err - } - - // TODO: This is a bit off-color. - machineDir := filepath.Join(b.storePath, "machines", machineName) - machineIsoPath := filepath.Join(machineDir, b.filename()) - - // By default just copy the existing "cached" iso to the machine's directory... - if isoURL == "" { - log.Infof("Copying %s to %s...", b.path(), machineIsoPath) - return CopyFile(b.path(), machineIsoPath) - } - - // if ISO is specified, check if it matches a github releases url or fallback to a direct download - downloadURL, err := b.getReleaseURL(isoURL) - if err != nil { - return err - } - - return b.DownloadISO(machineDir, b.filename(), downloadURL) -} - -// isLatest checks the latest release tag and -// reports whether the local ISO cache is the latest version. -// -// It returns false if failing to get the local ISO version -// and true if failing to fetch the latest release tag. -func (b *B2dUtils) isLatest() bool { - localVer, err := b.version() - if err != nil { - log.Warn("Unable to get the local Boot2Docker ISO version: ", err) - return false - } - - latestVer, err := b.getReleaseTag("") - if err != nil { - log.Warn("Unable to get the latest Boot2Docker ISO release version: ", err) - return true - } - - return localVer == latestVer -} - -// MakeDiskImage makes a boot2docker VM disk image. -// See https://github.com/boot2docker/boot2docker/blob/master/rootfs/rootfs/etc/rc.d/automount -func MakeDiskImage(publicSSHKeyPath string) (*bytes.Buffer, error) { - magicString := "boot2docker, please format-me" - - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - - // magicString first so the automount script knows to format the disk - file := &tar.Header{Name: magicString, Size: int64(len(magicString))} - - log.Debug("Writing magic tar header") - - if err := tw.WriteHeader(file); err != nil { - return nil, err - } - - if _, err := tw.Write([]byte(magicString)); err != nil { - return nil, err - } - - // .ssh/key.pub => authorized_keys - file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700} - if err := tw.WriteHeader(file); err != nil { - return nil, err - } - - log.Debug("Writing SSH key tar header") - - pubKey, err := ioutil.ReadFile(publicSSHKeyPath) - if err != nil { - return nil, err - } - - file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644} - if err := tw.WriteHeader(file); err != nil { - return nil, err - } - - if _, err := tw.Write([]byte(pubKey)); err != nil { - return nil, err - } - - file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644} - if err := tw.WriteHeader(file); err != nil { - return nil, err - } - - if _, err := tw.Write([]byte(pubKey)); err != nil { - return nil, err - } - - if err := tw.Close(); err != nil { - return nil, err - } - - return buf, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/mcnutils/utils.go b/vendor/github.com/docker/machine/libmachine/mcnutils/utils.go deleted file mode 100644 index a3992ed1646e..000000000000 --- a/vendor/github.com/docker/machine/libmachine/mcnutils/utils.go +++ /dev/null @@ -1,133 +0,0 @@ -package mcnutils - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "io" - "os" - "runtime" - "strconv" - "time" -) - -type MultiError struct { - Errs []error -} - -func (e MultiError) Error() string { - aggregate := "" - for _, err := range e.Errs { - aggregate += err.Error() + "\n" - } - return aggregate -} - -// GetHomeDir returns the home directory -// TODO: Having this here just strikes me as dangerous, but some of the drivers -// depend on it ;_; -func GetHomeDir() string { - if runtime.GOOS == "windows" { - return os.Getenv("USERPROFILE") - } - return os.Getenv("HOME") -} - -func GetUsername() string { - u := "unknown" - osUser := "" - - switch runtime.GOOS { - case "darwin", "linux": - osUser = os.Getenv("USER") - case "windows": - osUser = os.Getenv("USERNAME") - } - - if osUser != "" { - u = osUser - } - - return u -} - -func CopyFile(src, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return err - } - - defer out.Close() - - if _, err = io.Copy(out, in); err != nil { - return err - } - - fi, err := os.Stat(src) - if err != nil { - return err - } - - return os.Chmod(dst, fi.Mode()) -} - -func WaitForSpecificOrError(f func() (bool, error), maxAttempts int, waitInterval time.Duration) error { - for i := 0; i < maxAttempts; i++ { - stop, err := f() - if err != nil { - return err - } - if stop { - return nil - } - time.Sleep(waitInterval) - } - return fmt.Errorf("Maximum number of retries (%d) exceeded", maxAttempts) -} - -func WaitForSpecific(f func() bool, maxAttempts int, waitInterval time.Duration) error { - return WaitForSpecificOrError(func() (bool, error) { - return f(), nil - }, maxAttempts, waitInterval) -} - -func WaitFor(f func() bool) error { - return WaitForSpecific(f, 60, 3*time.Second) -} - -// TruncateID returns a shorten id -// Following two functions are from github.com/docker/docker/utils module. It -// was way overkill to include the whole module, so we just have these bits -// that we're using here. -func TruncateID(id string) string { - shortLen := 12 - if len(id) < shortLen { - shortLen = len(id) - } - return id[:shortLen] -} - -// GenerateRandomID returns an unique id -func GenerateRandomID() string { - for { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - value := hex.EncodeToString(id) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { - continue - } - return value - } -} diff --git a/vendor/github.com/docker/machine/libmachine/persist/filestore.go b/vendor/github.com/docker/machine/libmachine/persist/filestore.go deleted file mode 100644 index ea79f44a5f64..000000000000 --- a/vendor/github.com/docker/machine/libmachine/persist/filestore.go +++ /dev/null @@ -1,161 +0,0 @@ -package persist - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/machine/libmachine/host" - "github.com/docker/machine/libmachine/mcnerror" -) - -type Filestore struct { - Path string - CaCertPath string - CaPrivateKeyPath string -} - -func NewFilestore(path, caCertPath, caPrivateKeyPath string) *Filestore { - return &Filestore{ - Path: path, - CaCertPath: caCertPath, - CaPrivateKeyPath: caPrivateKeyPath, - } -} - -func (s Filestore) GetMachinesDir() string { - return filepath.Join(s.Path, "machines") -} - -func (s Filestore) saveToFile(data []byte, file string) error { - if _, err := os.Stat(file); os.IsNotExist(err) { - return ioutil.WriteFile(file, data, 0600) - } - - tmpfi, err := ioutil.TempFile(filepath.Dir(file), "config.json.tmp") - if err != nil { - return err - } - defer os.Remove(tmpfi.Name()) - - if err = ioutil.WriteFile(tmpfi.Name(), data, 0600); err != nil { - return err - } - - if err = tmpfi.Close(); err != nil { - return err - } - - if err = os.Remove(file); err != nil { - return err - } - - err = os.Rename(tmpfi.Name(), file) - return err -} - -func (s Filestore) Save(host *host.Host) error { - data, err := json.MarshalIndent(host, "", " ") - if err != nil { - return err - } - - hostPath := filepath.Join(s.GetMachinesDir(), host.Name) - - // Ensure that the directory we want to save to exists. - if err := os.MkdirAll(hostPath, 0700); err != nil { - return err - } - - return s.saveToFile(data, filepath.Join(hostPath, "config.json")) -} - -func (s Filestore) Remove(name string) error { - hostPath := filepath.Join(s.GetMachinesDir(), name) - return os.RemoveAll(hostPath) -} - -func (s Filestore) List() ([]string, error) { - dir, err := ioutil.ReadDir(s.GetMachinesDir()) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - - hostNames := []string{} - - for _, file := range dir { - if file.IsDir() && !strings.HasPrefix(file.Name(), ".") { - hostNames = append(hostNames, file.Name()) - } - } - - return hostNames, nil -} - -func (s Filestore) Exists(name string) (bool, error) { - _, err := os.Stat(filepath.Join(s.GetMachinesDir(), name)) - - if os.IsNotExist(err) { - return false, nil - } else if err == nil { - return true, nil - } - - return false, err -} - -func (s Filestore) loadConfig(h *host.Host) error { - data, err := ioutil.ReadFile(filepath.Join(s.GetMachinesDir(), h.Name, "config.json")) - if err != nil { - return err - } - - // Remember the machine name so we don't have to pass it through each - // struct in the migration. - name := h.Name - - migratedHost, migrationPerformed, err := host.MigrateHost(h, data) - if err != nil { - return fmt.Errorf("Error getting migrated host: %s", err) - } - - *h = *migratedHost - - h.Name = name - - // If we end up performing a migration, we should save afterwards so we don't have to do it again on subsequent invocations. - if migrationPerformed { - if err := s.saveToFile(data, filepath.Join(s.GetMachinesDir(), h.Name, "config.json.bak")); err != nil { - return fmt.Errorf("Error attempting to save backup after migration: %s", err) - } - - if err := s.Save(h); err != nil { - return fmt.Errorf("Error saving config after migration was performed: %s", err) - } - } - - return nil -} - -func (s Filestore) Load(name string) (*host.Host, error) { - hostPath := filepath.Join(s.GetMachinesDir(), name) - - if _, err := os.Stat(hostPath); os.IsNotExist(err) { - return nil, mcnerror.ErrHostDoesNotExist{ - Name: name, - } - } - - host := &host.Host{ - Name: name, - } - - if err := s.loadConfig(host); err != nil { - return nil, err - } - - return host, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/persist/store.go b/vendor/github.com/docker/machine/libmachine/persist/store.go deleted file mode 100644 index 1d9d489bfdc6..000000000000 --- a/vendor/github.com/docker/machine/libmachine/persist/store.go +++ /dev/null @@ -1,47 +0,0 @@ -package persist - -import ( - "github.com/docker/machine/libmachine/host" -) - -type Store interface { - // Exists returns whether a machine exists or not - Exists(name string) (bool, error) - - // List returns a list of all hosts in the store - List() ([]string, error) - - // Load loads a host by name - Load(name string) (*host.Host, error) - - // Remove removes a machine from the store - Remove(name string) error - - // Save persists a machine in the store - Save(host *host.Host) error -} - -func LoadHosts(s Store, hostNames []string) ([]*host.Host, map[string]error) { - loadedHosts := []*host.Host{} - errors := map[string]error{} - - for _, hostName := range hostNames { - h, err := s.Load(hostName) - if err != nil { - errors[hostName] = err - } else { - loadedHosts = append(loadedHosts, h) - } - } - - return loadedHosts, errors -} - -func LoadAllHosts(s Store) ([]*host.Host, map[string]error, error) { - hostNames, err := s.List() - if err != nil { - return nil, nil, err - } - loadedHosts, hostInError := LoadHosts(s, hostNames) - return loadedHosts, hostInError, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/arch.go b/vendor/github.com/docker/machine/libmachine/provision/arch.go deleted file mode 100644 index 6a7399830175..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/arch.go +++ /dev/null @@ -1,152 +0,0 @@ -package provision - -import ( - "fmt" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -func init() { - Register("Arch", &RegisteredProvisioner{ - New: NewArchProvisioner, - }) -} - -func NewArchProvisioner(d drivers.Driver) Provisioner { - return &ArchProvisioner{ - NewSystemdProvisioner("arch", d), - } -} - -type ArchProvisioner struct { - SystemdProvisioner -} - -func (provisioner *ArchProvisioner) String() string { - return "arch" -} - -func (provisioner *ArchProvisioner) CompatibleWithHost() bool { - return provisioner.OsReleaseInfo.ID == provisioner.OsReleaseID || provisioner.OsReleaseInfo.IDLike == provisioner.OsReleaseID -} - -func (provisioner *ArchProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - updateMetadata := true - - switch action { - case pkgaction.Install, pkgaction.Upgrade: - packageAction = "S" - case pkgaction.Remove: - packageAction = "R" - updateMetadata = false - } - - switch name { - case "docker-engine": - name = "docker" - case "docker": - name = "docker" - } - - pacmanOpts := "-" + packageAction - if updateMetadata { - pacmanOpts = pacmanOpts + "y" - } - - pacmanOpts = pacmanOpts + " --noconfirm --noprogressbar" - - command := fmt.Sprintf("sudo -E pacman %s %s", pacmanOpts, name) - - log.Debugf("package: action=%s name=%s", action.String(), name) - - if _, err := provisioner.SSHCommand(command); err != nil { - return err - } - - return nil -} - -func (provisioner *ArchProvisioner) dockerDaemonResponding() bool { - log.Debug("checking docker daemon") - - if out, err := provisioner.SSHCommand("sudo docker version"); err != nil { - log.Warnf("Error getting SSH command to check if the daemon is up: %s", err) - log.Debugf("'sudo docker version' output:\n%s", out) - return false - } - - // The daemon is up if the command worked. Carry on. - return true -} - -func (provisioner *ArchProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - storageDriver, err := decideStorageDriver(provisioner, "overlay", engineOptions.StorageDriver) - if err != nil { - return err - } - provisioner.EngineOptions.StorageDriver = storageDriver - - // HACK: since Arch does not come with sudo by default we install - log.Debug("Installing sudo") - if _, err := provisioner.SSHCommand("if ! type sudo; then pacman -Sy --noconfirm --noprogressbar sudo; fi"); err != nil { - return err - } - - log.Debug("Setting hostname") - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - log.Debug("Installing base packages") - for _, pkg := range provisioner.Packages { - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - log.Debug("Installing docker") - if err := provisioner.Package("docker", pkgaction.Install); err != nil { - return err - } - - log.Debug("Starting systemd docker service") - if err := provisioner.Service("docker", serviceaction.Start); err != nil { - return err - } - - log.Debug("Waiting for docker daemon") - if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - log.Debug("Configuring auth") - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - log.Debug("Configuring swarm") - if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { - return err - } - - // enable in systemd - log.Debug("Enabling docker in systemd") - err = provisioner.Service("docker", serviceaction.Enable) - return err -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/boot2docker.go b/vendor/github.com/docker/machine/libmachine/provision/boot2docker.go deleted file mode 100644 index ebe3b523ada3..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/boot2docker.go +++ /dev/null @@ -1,268 +0,0 @@ -package provision - -import ( - "bytes" - "encoding/json" - "fmt" - "net" - "path" - "text/template" - "time" - - "github.com/docker/machine/commands/mcndirs" - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/state" - "github.com/docker/machine/libmachine/swarm" -) - -func init() { - Register("boot2docker", &RegisteredProvisioner{ - New: NewBoot2DockerProvisioner, - }) -} - -func NewBoot2DockerProvisioner(d drivers.Driver) Provisioner { - return &Boot2DockerProvisioner{ - Driver: d, - } -} - -type Boot2DockerProvisioner struct { - OsReleaseInfo *OsRelease - Driver drivers.Driver - AuthOptions auth.Options - EngineOptions engine.Options - SwarmOptions swarm.Options -} - -func (provisioner *Boot2DockerProvisioner) String() string { - return "boot2docker" -} - -func (provisioner *Boot2DockerProvisioner) Service(name string, action serviceaction.ServiceAction) error { - _, err := provisioner.SSHCommand(fmt.Sprintf("sudo /etc/init.d/%s %s", name, action.String())) - return err -} - -func (provisioner *Boot2DockerProvisioner) upgradeIso() error { - // TODO: Ideally, we should not read from mcndirs directory at all. - // The driver should be able to communicate how and where to place the - // relevant files. - b2dutils := mcnutils.NewB2dUtils(mcndirs.GetBaseDir()) - - // Check if the driver has specified a custom b2d url - jsonDriver, err := json.Marshal(provisioner.GetDriver()) - if err != nil { - return err - } - var d struct { - Boot2DockerURL string - } - json.Unmarshal(jsonDriver, &d) - - log.Info("Stopping machine to do the upgrade...") - - if err := provisioner.Driver.Stop(); err != nil { - return err - } - - if err := mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Stopped)); err != nil { - return err - } - - machineName := provisioner.GetDriver().GetMachineName() - - log.Infof("Upgrading machine %q...", machineName) - - // Either download the latest version of the b2d url that was explicitly - // specified when creating the VM or copy the (updated) default ISO - if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, machineName); err != nil { - return err - } - - log.Infof("Starting machine back up...") - - if err := provisioner.Driver.Start(); err != nil { - return err - } - - return mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Running)) -} - -func (provisioner *Boot2DockerProvisioner) Package(name string, action pkgaction.PackageAction) error { - if name == "docker" && action == pkgaction.Upgrade { - if err := provisioner.upgradeIso(); err != nil { - return err - } - } - return nil -} - -func (provisioner *Boot2DockerProvisioner) Hostname() (string, error) { - return provisioner.SSHCommand("hostname") -} - -func (provisioner *Boot2DockerProvisioner) SetHostname(hostname string) error { - if _, err := provisioner.SSHCommand(fmt.Sprintf( - "sudo /usr/bin/sethostname %s && echo %q | sudo tee /var/lib/boot2docker/etc/hostname", - hostname, - hostname, - )); err != nil { - return err - } - - return nil -} - -func (provisioner *Boot2DockerProvisioner) GetDockerOptionsDir() string { - return "/var/lib/boot2docker" -} - -func (provisioner *Boot2DockerProvisioner) GetAuthOptions() auth.Options { - return provisioner.AuthOptions -} - -func (provisioner *Boot2DockerProvisioner) GetSwarmOptions() swarm.Options { - return provisioner.SwarmOptions -} - -func (provisioner *Boot2DockerProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) { - var ( - engineCfg bytes.Buffer - ) - - driverNameLabel := fmt.Sprintf("provider=%s", provisioner.Driver.DriverName()) - provisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel) - - engineConfigTmpl := ` -EXTRA_ARGS=' -{{ range .EngineOptions.Labels }}--label {{.}} -{{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} -{{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} -{{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} -{{ end }} -' -CACERT={{.AuthOptions.CaCertRemotePath}} -DOCKER_HOST='-H tcp://0.0.0.0:{{.DockerPort}}' -DOCKER_STORAGE={{.EngineOptions.StorageDriver}} -DOCKER_TLS=auto -SERVERKEY={{.AuthOptions.ServerKeyRemotePath}} -SERVERCERT={{.AuthOptions.ServerCertRemotePath}} - -{{range .EngineOptions.Env}}export \"{{ printf "%q" . }}\" -{{end}} -` - t, err := template.New("engineConfig").Parse(engineConfigTmpl) - if err != nil { - return nil, err - } - - engineConfigContext := EngineConfigContext{ - DockerPort: dockerPort, - AuthOptions: provisioner.AuthOptions, - EngineOptions: provisioner.EngineOptions, - } - - t.Execute(&engineCfg, engineConfigContext) - - daemonOptsDir := path.Join(provisioner.GetDockerOptionsDir(), "profile") - return &DockerOptions{ - EngineOptions: engineCfg.String(), - EngineOptionsPath: daemonOptsDir, - }, nil -} - -func (provisioner *Boot2DockerProvisioner) CompatibleWithHost() bool { - return provisioner.OsReleaseInfo.ID == "boot2docker" -} - -func (provisioner *Boot2DockerProvisioner) SetOsReleaseInfo(info *OsRelease) { - provisioner.OsReleaseInfo = info -} - -func (provisioner *Boot2DockerProvisioner) GetOsReleaseInfo() (*OsRelease, error) { - return provisioner.OsReleaseInfo, nil -} - -func (provisioner *Boot2DockerProvisioner) AttemptIPContact(dockerPort int) { - ip, err := provisioner.Driver.GetIP() - if err != nil { - log.Warnf("Could not get IP address for created machine: %s", err) - return - } - - if conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, dockerPort), 5*time.Second); err != nil { - log.Warnf(` -This machine has been allocated an IP address, but Docker Machine could not -reach it successfully. - -SSH for the machine should still work, but connecting to exposed ports, such as -the Docker daemon port (usually :%d), may not work properly. - -You may need to add the route manually, or use another related workaround. - -This could be due to a VPN, proxy, or host file configuration issue. - -You also might want to clear any VirtualBox host only interfaces you are not using.`, engine.DefaultPort) - } else { - conn.Close() - } -} - -func (provisioner *Boot2DockerProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - var ( - err error - ) - - defer func() { - if err == nil { - provisioner.AttemptIPContact(engine.DefaultPort) - } - }() - - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - if provisioner.EngineOptions.StorageDriver == "" { - provisioner.EngineOptions.StorageDriver = "overlay2" - } - - if err = provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - // b2d hosts need to wait for the daemon to be up - // before continuing with provisioning - if err = WaitForDocker(provisioner, engine.DefaultPort); err != nil { - return err - } - - if err = makeDockerOptionsDir(provisioner); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - if err = ConfigureAuth(provisioner); err != nil { - return err - } - - err = configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions) - return err -} - -func (provisioner *Boot2DockerProvisioner) SSHCommand(args string) (string, error) { - return drivers.RunSSHCommandFromDriver(provisioner.Driver, args) -} - -func (provisioner *Boot2DockerProvisioner) GetDriver() drivers.Driver { - return provisioner.Driver -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/centos.go b/vendor/github.com/docker/machine/libmachine/provision/centos.go deleted file mode 100644 index 11c01a3953de..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/centos.go +++ /dev/null @@ -1,25 +0,0 @@ -package provision - -import ( - "github.com/docker/machine/libmachine/drivers" -) - -func init() { - Register("Centos", &RegisteredProvisioner{ - New: NewCentosProvisioner, - }) -} - -func NewCentosProvisioner(d drivers.Driver) Provisioner { - return &CentosProvisioner{ - NewRedHatProvisioner("centos", d), - } -} - -type CentosProvisioner struct { - *RedHatProvisioner -} - -func (provisioner *CentosProvisioner) String() string { - return "centos" -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/configure_swarm.go b/vendor/github.com/docker/machine/libmachine/provision/configure_swarm.go deleted file mode 100644 index 5d7545caecad..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/configure_swarm.go +++ /dev/null @@ -1,149 +0,0 @@ -package provision - -import ( - "fmt" - "net/url" - "strconv" - "strings" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcndockerclient" - "github.com/docker/machine/libmachine/swarm" - "github.com/samalba/dockerclient" -) - -func configureSwarm(p Provisioner, swarmOptions swarm.Options, authOptions auth.Options) error { - if !swarmOptions.IsSwarm { - return nil - } - - log.Info("Configuring swarm...") - - ip, err := p.GetDriver().GetIP() - if err != nil { - return err - } - - u, err := url.Parse(swarmOptions.Host) - if err != nil { - return err - } - - enginePort := engine.DefaultPort - engineURL, err := p.GetDriver().GetURL() - if err != nil { - return err - } - - parts := strings.Split(engineURL, ":") - if len(parts) == 3 { - dPort, err := strconv.Atoi(parts[2]) - if err != nil { - return err - } - enginePort = dPort - } - - parts = strings.Split(u.Host, ":") - port := parts[1] - - dockerDir := p.GetDockerOptionsDir() - dockerHost := &mcndockerclient.RemoteDocker{ - HostURL: fmt.Sprintf("tcp://%s:%d", ip, enginePort), - AuthOption: &authOptions, - } - advertiseInfo := fmt.Sprintf("%s:%d", ip, enginePort) - - if swarmOptions.Master { - advertiseMasterInfo := fmt.Sprintf("%s:%s", ip, "3376") - cmd := fmt.Sprintf("manage --tlsverify --tlscacert=%s --tlscert=%s --tlskey=%s -H %s --strategy %s --advertise %s", - authOptions.CaCertRemotePath, - authOptions.ServerCertRemotePath, - authOptions.ServerKeyRemotePath, - swarmOptions.Host, - swarmOptions.Strategy, - advertiseMasterInfo, - ) - if swarmOptions.IsExperimental { - cmd = "--experimental " + cmd - } - - cmdMaster := strings.Fields(cmd) - for _, option := range swarmOptions.ArbitraryFlags { - cmdMaster = append(cmdMaster, "--"+option) - } - - //Discovery must be at end of command - cmdMaster = append(cmdMaster, swarmOptions.Discovery) - - hostBind := fmt.Sprintf("%s:%s", dockerDir, dockerDir) - masterHostConfig := dockerclient.HostConfig{ - RestartPolicy: dockerclient.RestartPolicy{ - Name: "always", - MaximumRetryCount: 0, - }, - Binds: []string{hostBind}, - PortBindings: map[string][]dockerclient.PortBinding{ - fmt.Sprintf("%s/tcp", port): { - { - HostIp: "0.0.0.0", - HostPort: port, - }, - }, - }, - } - - swarmMasterConfig := &dockerclient.ContainerConfig{ - Image: swarmOptions.Image, - Env: swarmOptions.Env, - ExposedPorts: map[string]struct{}{ - "2375/tcp": {}, - fmt.Sprintf("%s/tcp", port): {}, - }, - Cmd: cmdMaster, - HostConfig: masterHostConfig, - } - - err = mcndockerclient.CreateContainer(dockerHost, swarmMasterConfig, "swarm-agent-master") - if err != nil { - return err - } - } - - if swarmOptions.Agent { - workerHostConfig := dockerclient.HostConfig{ - RestartPolicy: dockerclient.RestartPolicy{ - Name: "always", - MaximumRetryCount: 0, - }, - } - - cmdWorker := []string{ - "join", - "--advertise", - advertiseInfo, - } - for _, option := range swarmOptions.ArbitraryJoinFlags { - cmdWorker = append(cmdWorker, "--"+option) - } - cmdWorker = append(cmdWorker, swarmOptions.Discovery) - - swarmWorkerConfig := &dockerclient.ContainerConfig{ - Image: swarmOptions.Image, - Env: swarmOptions.Env, - Cmd: cmdWorker, - HostConfig: workerHostConfig, - } - if swarmOptions.IsExperimental { - swarmWorkerConfig.Cmd = append([]string{"--experimental"}, swarmWorkerConfig.Cmd...) - } - - err = mcndockerclient.CreateContainer(dockerHost, swarmWorkerConfig, "swarm-agent") - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/coreos.go b/vendor/github.com/docker/machine/libmachine/provision/coreos.go deleted file mode 100644 index bbb798b70c4c..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/coreos.go +++ /dev/null @@ -1,132 +0,0 @@ -package provision - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/swarm" - "github.com/docker/machine/libmachine/versioncmp" -) - -const ( - hostTmpl = `sudo tee /var/tmp/hostname.yml << EOF -#cloud-config - -hostname: %s -EOF -` -) - -func init() { - Register("CoreOS", &RegisteredProvisioner{ - New: NewCoreOSProvisioner, - }) -} - -func NewCoreOSProvisioner(d drivers.Driver) Provisioner { - return &CoreOSProvisioner{ - NewSystemdProvisioner("coreos", d), - } -} - -type CoreOSProvisioner struct { - SystemdProvisioner -} - -func (provisioner *CoreOSProvisioner) String() string { - return "coreOS" -} - -func (provisioner *CoreOSProvisioner) SetHostname(hostname string) error { - log.Debugf("SetHostname: %s", hostname) - - if _, err := provisioner.SSHCommand(fmt.Sprintf(hostTmpl, hostname)); err != nil { - return err - } - - if _, err := provisioner.SSHCommand("sudo systemctl start system-cloudinit@var-tmp-hostname.yml.service"); err != nil { - return err - } - - return nil -} - -func (provisioner *CoreOSProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) { - var ( - engineCfg bytes.Buffer - ) - - driverNameLabel := fmt.Sprintf("provider=%s", provisioner.Driver.DriverName()) - provisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel) - - dockerVersion, err := DockerClientVersion(provisioner) - if err != nil { - return nil, err - } - - arg := "daemon" - if versioncmp.GreaterThanOrEqualTo(dockerVersion, "1.12.0") { - arg = "" - } - - engineConfigTmpl := `[Service] -Environment=TMPDIR=/var/tmp -ExecStart= -ExecStart=/usr/lib/coreos/dockerd ` + arg + ` --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:{{.DockerPort}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}}{{ range .EngineOptions.Labels }} --label {{.}}{{ end }}{{ range .EngineOptions.InsecureRegistry }} --insecure-registry {{.}}{{ end }}{{ range .EngineOptions.RegistryMirror }} --registry-mirror {{.}}{{ end }}{{ range .EngineOptions.ArbitraryFlags }} --{{.}}{{ end }} \$DOCKER_OPTS \$DOCKER_OPT_BIP \$DOCKER_OPT_MTU \$DOCKER_OPT_IPMASQ -Environment={{range .EngineOptions.Env}}{{ printf "%q" . }} {{end}} -` - - t, err := template.New("engineConfig").Parse(engineConfigTmpl) - if err != nil { - return nil, err - } - - engineConfigContext := EngineConfigContext{ - DockerPort: dockerPort, - AuthOptions: provisioner.AuthOptions, - EngineOptions: provisioner.EngineOptions, - } - - t.Execute(&engineCfg, engineConfigContext) - - return &DockerOptions{ - EngineOptions: engineCfg.String(), - EngineOptionsPath: provisioner.DaemonOptionsFile, - }, nil -} - -func (provisioner *CoreOSProvisioner) Package(name string, action pkgaction.PackageAction) error { - return nil -} - -func (provisioner *CoreOSProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - if err := makeDockerOptionsDir(provisioner); err != nil { - return err - } - - log.Debugf("Preparing certificates") - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - log.Debugf("Setting up certificates") - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - log.Debug("Configuring swarm") - err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions) - return err -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/debian.go b/vendor/github.com/docker/machine/libmachine/provision/debian.go deleted file mode 100644 index ce8bcc5e1763..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/debian.go +++ /dev/null @@ -1,139 +0,0 @@ -package provision - -import ( - "fmt" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -func init() { - Register("Debian", &RegisteredProvisioner{ - New: NewDebianProvisioner, - }) -} - -func NewDebianProvisioner(d drivers.Driver) Provisioner { - return &DebianProvisioner{ - NewSystemdProvisioner("debian", d), - } -} - -type DebianProvisioner struct { - SystemdProvisioner -} - -func (provisioner *DebianProvisioner) String() string { - return "debian" -} - -func (provisioner *DebianProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - updateMetadata := true - - switch action { - case pkgaction.Install, pkgaction.Upgrade: - packageAction = "install" - case pkgaction.Remove: - packageAction = "remove" - updateMetadata = false - case pkgaction.Purge: - packageAction = "purge" - updateMetadata = false - } - - switch name { - case "docker": - name = "docker-engine" - } - - if updateMetadata { - if err := waitForLockAptGetUpdate(provisioner); err != nil { - return err - } - } - - command := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive sudo -E apt-get %s -y %s", packageAction, name) - - log.Debugf("package: action=%s name=%s", action.String(), name) - - return waitForLock(provisioner, command) -} - -func (provisioner *DebianProvisioner) dockerDaemonResponding() bool { - log.Debug("checking docker daemon") - - if out, err := provisioner.SSHCommand("sudo docker version"); err != nil { - log.Warnf("Error getting SSH command to check if the daemon is up: %s", err) - log.Debugf("'sudo docker version' output:\n%s", out) - return false - } - - // The daemon is up if the command worked. Carry on. - return true -} - -func (provisioner *DebianProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver) - if err != nil { - return err - } - provisioner.EngineOptions.StorageDriver = storageDriver - - // HACK: since debian does not come with sudo by default we install - log.Debug("installing sudo") - if _, err := provisioner.SSHCommand("if ! type sudo; then apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo; fi"); err != nil { - return err - } - - log.Debug("setting hostname") - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - log.Debug("installing base packages") - for _, pkg := range provisioner.Packages { - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - log.Debug("installing docker") - if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { - return err - } - - log.Debug("waiting for docker daemon") - if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - log.Debug("configuring auth") - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - log.Debug("configuring swarm") - if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { - return err - } - - // enable in systemd - log.Debug("enabling docker in systemd") - err = provisioner.Service("docker", serviceaction.Enable) - return err -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/engine_config_context.go b/vendor/github.com/docker/machine/libmachine/provision/engine_config_context.go deleted file mode 100644 index 7a7e9a8082e0..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/engine_config_context.go +++ /dev/null @@ -1,13 +0,0 @@ -package provision - -import ( - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/engine" -) - -type EngineConfigContext struct { - DockerPort int - AuthOptions auth.Options - EngineOptions engine.Options - DockerOptionsDir string -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/errors.go b/vendor/github.com/docker/machine/libmachine/provision/errors.go deleted file mode 100644 index 46348b90ebb6..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/errors.go +++ /dev/null @@ -1,24 +0,0 @@ -package provision - -import ( - "errors" - "fmt" -) - -var ( - ErrDetectionFailed = errors.New("OS type not recognized") -) - -type ErrDaemonAvailable struct { - wrappedErr error -} - -func (e ErrDaemonAvailable) Error() string { - return fmt.Sprintf("Unable to verify the Docker daemon is listening: %s", e.wrappedErr) -} - -func NewErrDaemonAvailable(err error) ErrDaemonAvailable { - return ErrDaemonAvailable{ - wrappedErr: err, - } -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/fake_provisioner.go b/vendor/github.com/docker/machine/libmachine/provision/fake_provisioner.go deleted file mode 100644 index 7d4caaed9a43..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/fake_provisioner.go +++ /dev/null @@ -1,110 +0,0 @@ -package provision - -import ( - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -type FakeDetector struct { - Provisioner -} - -func (fd *FakeDetector) DetectProvisioner(d drivers.Driver) (Provisioner, error) { - return fd.Provisioner, nil -} - -type FakeProvisioner struct{} - -func NewFakeProvisioner(d drivers.Driver) Provisioner { - return &FakeProvisioner{} -} - -func (fp *FakeProvisioner) SSHCommand(args string) (string, error) { - return "", nil -} - -func (fp *FakeProvisioner) String() string { - return "fakeprovisioner" -} - -func (fp *FakeProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) { - return nil, nil -} - -func (fp *FakeProvisioner) GetDockerOptionsDir() string { - return "" -} - -func (fp *FakeProvisioner) GetAuthOptions() auth.Options { - return auth.Options{} -} - -func (fp *FakeProvisioner) GetSwarmOptions() swarm.Options { - return swarm.Options{} -} - -func (fp *FakeProvisioner) Package(name string, action pkgaction.PackageAction) error { - return nil -} - -func (fp *FakeProvisioner) Hostname() (string, error) { - return "", nil -} - -func (fp *FakeProvisioner) SetHostname(hostname string) error { - return nil -} - -func (fp *FakeProvisioner) CompatibleWithHost() bool { - return true -} - -func (fp *FakeProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - return nil -} - -func (fp *FakeProvisioner) Service(name string, action serviceaction.ServiceAction) error { - return nil -} - -func (fp *FakeProvisioner) GetDriver() drivers.Driver { - return nil -} - -func (fp *FakeProvisioner) SetOsReleaseInfo(info *OsRelease) {} - -func (fp *FakeProvisioner) GetOsReleaseInfo() (*OsRelease, error) { - return nil, nil -} - -type NetstatProvisioner struct { - *FakeProvisioner -} - -func (p *NetstatProvisioner) SSHCommand(args string) (string, error) { - return `Active Internet connections (servers and established) -Proto Recv-Q Send-Q Local Address Foreign Address State -tcp 0 0 0.0.0.0:ssh 0.0.0.0:* LISTEN -tcp 0 72 192.168.25.141:ssh 192.168.25.1:63235 ESTABLISHED -tcp 0 0 :::2376 :::* LISTEN -tcp 0 0 :::ssh :::* LISTEN -Active UNIX domain sockets (servers and established) -Proto RefCnt Flags Type State I-Node Path -unix 2 [ ACC ] STREAM LISTENING 17990 /var/run/acpid.socket -unix 2 [ ACC ] SEQPACKET LISTENING 14233 /run/udev/control -unix 2 [ ACC ] STREAM LISTENING 19365 /var/run/docker.sock -unix 3 [ ] STREAM CONNECTED 19774 -unix 3 [ ] STREAM CONNECTED 19775 -unix 3 [ ] DGRAM 14243 -unix 3 [ ] DGRAM 14242`, nil -} - -func NewNetstatProvisioner() Provisioner { - return &NetstatProvisioner{ - &FakeProvisioner{}, - } -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/fedora.go b/vendor/github.com/docker/machine/libmachine/provision/fedora.go deleted file mode 100644 index 71989c02a201..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/fedora.go +++ /dev/null @@ -1,25 +0,0 @@ -package provision - -import ( - "github.com/docker/machine/libmachine/drivers" -) - -func init() { - Register("Fedora", &RegisteredProvisioner{ - New: NewFedoraProvisioner, - }) -} - -func NewFedoraProvisioner(d drivers.Driver) Provisioner { - return &FedoraProvisioner{ - NewRedHatProvisioner("fedora", d), - } -} - -type FedoraProvisioner struct { - *RedHatProvisioner -} - -func (provisioner *FedoraProvisioner) String() string { - return "fedora" -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/generic.go b/vendor/github.com/docker/machine/libmachine/provision/generic.go deleted file mode 100644 index 24ad1b559991..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/generic.go +++ /dev/null @@ -1,138 +0,0 @@ -package provision - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/swarm" -) - -type GenericProvisioner struct { - SSHCommander - OsReleaseID string - DockerOptionsDir string - DaemonOptionsFile string - Packages []string - OsReleaseInfo *OsRelease - Driver drivers.Driver - AuthOptions auth.Options - EngineOptions engine.Options - SwarmOptions swarm.Options -} - -type GenericSSHCommander struct { - Driver drivers.Driver -} - -func (sshCmder GenericSSHCommander) SSHCommand(args string) (string, error) { - return drivers.RunSSHCommandFromDriver(sshCmder.Driver, args) -} - -func (provisioner *GenericProvisioner) Hostname() (string, error) { - return provisioner.SSHCommand("hostname") -} - -func (provisioner *GenericProvisioner) SetHostname(hostname string) error { - if _, err := provisioner.SSHCommand(fmt.Sprintf( - "sudo hostname %s && echo %q | sudo tee /etc/hostname", - hostname, - hostname, - )); err != nil { - return err - } - - // ubuntu/debian use 127.0.1.1 for non "localhost" loopback hostnames: https://www.debian.org/doc/manuals/debian-reference/ch05.en.html#_the_hostname_resolution - if _, err := provisioner.SSHCommand(fmt.Sprintf(` - if ! grep -xq '.*\s%s' /etc/hosts; then - if grep -xq '127.0.1.1\s.*' /etc/hosts; then - sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 %s/g' /etc/hosts; - else - echo '127.0.1.1 %s' | sudo tee -a /etc/hosts; - fi - fi`, - hostname, - hostname, - hostname, - )); err != nil { - return err - } - - return nil -} - -func (provisioner *GenericProvisioner) GetDockerOptionsDir() string { - return provisioner.DockerOptionsDir -} - -func (provisioner *GenericProvisioner) CompatibleWithHost() bool { - return provisioner.OsReleaseInfo.ID == provisioner.OsReleaseID -} - -func (provisioner *GenericProvisioner) GetAuthOptions() auth.Options { - return provisioner.AuthOptions -} - -func (provisioner *GenericProvisioner) GetSwarmOptions() swarm.Options { - return provisioner.SwarmOptions -} - -func (provisioner *GenericProvisioner) SetOsReleaseInfo(info *OsRelease) { - provisioner.OsReleaseInfo = info -} - -func (provisioner *GenericProvisioner) GetOsReleaseInfo() (*OsRelease, error) { - return provisioner.OsReleaseInfo, nil -} - -func (provisioner *GenericProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) { - var ( - engineCfg bytes.Buffer - ) - - driverNameLabel := fmt.Sprintf("provider=%s", provisioner.Driver.DriverName()) - provisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel) - - engineConfigTmpl := ` -DOCKER_OPTS=' --H tcp://0.0.0.0:{{.DockerPort}} --H unix:///var/run/docker.sock ---storage-driver {{.EngineOptions.StorageDriver}} ---tlsverify ---tlscacert {{.AuthOptions.CaCertRemotePath}} ---tlscert {{.AuthOptions.ServerCertRemotePath}} ---tlskey {{.AuthOptions.ServerKeyRemotePath}} -{{ range .EngineOptions.Labels }}--label {{.}} -{{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} -{{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} -{{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} -{{ end }} -' -{{range .EngineOptions.Env}}export \"{{ printf "%q" . }}\" -{{end}} -` - t, err := template.New("engineConfig").Parse(engineConfigTmpl) - if err != nil { - return nil, err - } - - engineConfigContext := EngineConfigContext{ - DockerPort: dockerPort, - AuthOptions: provisioner.AuthOptions, - EngineOptions: provisioner.EngineOptions, - } - - t.Execute(&engineCfg, engineConfigContext) - - return &DockerOptions{ - EngineOptions: engineCfg.String(), - EngineOptionsPath: provisioner.DaemonOptionsFile, - }, nil -} - -func (provisioner *GenericProvisioner) GetDriver() drivers.Driver { - return provisioner.Driver -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/oraclelinux.go b/vendor/github.com/docker/machine/libmachine/provision/oraclelinux.go deleted file mode 100644 index 56d263741d2e..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/oraclelinux.go +++ /dev/null @@ -1,25 +0,0 @@ -package provision - -import ( - "github.com/docker/machine/libmachine/drivers" -) - -func init() { - Register("OracleLinux", &RegisteredProvisioner{ - New: NewOracleLinuxProvisioner, - }) -} - -func NewOracleLinuxProvisioner(d drivers.Driver) Provisioner { - return &OracleLinuxProvisioner{ - NewRedHatProvisioner("ol", d), - } -} - -type OracleLinuxProvisioner struct { - *RedHatProvisioner -} - -func (provisioner *OracleLinuxProvisioner) String() string { - return "ol" -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/os_release.go b/vendor/github.com/docker/machine/libmachine/provision/os_release.go deleted file mode 100644 index 4c3bdf2a1365..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/os_release.go +++ /dev/null @@ -1,91 +0,0 @@ -package provision - -import ( - "bufio" - "bytes" - "fmt" - "reflect" - "strings" - - "github.com/docker/machine/libmachine/log" -) - -// The /etc/os-release file contains operating system identification data -// See http://www.freedesktop.org/software/systemd/man/os-release.html for more details - -// OsRelease reflects values in /etc/os-release -// Values in this struct must always be string -// or the reflection will not work properly. -type OsRelease struct { - AnsiColor string `osr:"ANSI_COLOR"` - Name string `osr:"NAME"` - Version string `osr:"VERSION"` - Variant string `osr:"VARIANT"` - VariantID string `osr:"VARIANT_ID"` - ID string `osr:"ID"` - IDLike string `osr:"ID_LIKE"` - PrettyName string `osr:"PRETTY_NAME"` - VersionID string `osr:"VERSION_ID"` - HomeURL string `osr:"HOME_URL"` - SupportURL string `osr:"SUPPORT_URL"` - BugReportURL string `osr:"BUG_REPORT_URL"` -} - -func stripQuotes(val string) string { - if len(val) > 0 && val[0] == '"' { - return val[1 : len(val)-1] - } - return val -} - -func (osr *OsRelease) setIfPossible(key, val string) error { - v := reflect.ValueOf(osr).Elem() - for i := 0; i < v.NumField(); i++ { - fieldValue := v.Field(i) - fieldType := v.Type().Field(i) - originalName := fieldType.Tag.Get("osr") - if key == originalName && fieldValue.Kind() == reflect.String { - fieldValue.SetString(val) - return nil - } - } - return fmt.Errorf("Couldn't set key %s, no corresponding struct field found", key) -} - -func parseLine(osrLine string) (string, string, error) { - if osrLine == "" { - return "", "", nil - } - - vals := strings.Split(osrLine, "=") - if len(vals) != 2 { - return "", "", fmt.Errorf("Expected %s to split by '=' char into two strings, instead got %d strings", osrLine, len(vals)) - } - key := vals[0] - val := stripQuotes(vals[1]) - return key, val, nil -} - -func (osr *OsRelease) ParseOsRelease(osReleaseContents []byte) error { - r := bytes.NewReader(osReleaseContents) - scanner := bufio.NewScanner(r) - for scanner.Scan() { - key, val, err := parseLine(scanner.Text()) - if err != nil { - log.Warnf("Warning: got an invalid line error parsing /etc/os-release: %s", err) - continue - } - if err := osr.setIfPossible(key, val); err != nil { - log.Debug(err) - } - } - return nil -} - -func NewOsRelease(contents []byte) (*OsRelease, error) { - osr := &OsRelease{} - if err := osr.ParseOsRelease(contents); err != nil { - return nil, err - } - return osr, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/pkgaction/pkg_action.go b/vendor/github.com/docker/machine/libmachine/provision/pkgaction/pkg_action.go deleted file mode 100644 index b7cf7e3279ac..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/pkgaction/pkg_action.go +++ /dev/null @@ -1,25 +0,0 @@ -package pkgaction - -type PackageAction int - -const ( - Install PackageAction = iota - Remove - Upgrade - Purge -) - -var packageActions = []string{ - "install", - "remove", - "upgrade", - "purge", -} - -func (s PackageAction) String() string { - if int(s) >= 0 && int(s) < len(packageActions) { - return packageActions[s] - } - - return "" -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/provisioner.go b/vendor/github.com/docker/machine/libmachine/provision/provisioner.go deleted file mode 100644 index 81fc1ebedfb4..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/provisioner.go +++ /dev/null @@ -1,132 +0,0 @@ -package provision - -import ( - "fmt" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -var ( - provisioners = make(map[string]*RegisteredProvisioner) - detector Detector = &StandardDetector{} -) - -const ( - LastReleaseBeforeCEVersioning = "1.13.1" -) - -type SSHCommander interface { - // Short-hand for accessing an SSH command from the driver. - SSHCommand(args string) (string, error) -} - -type Detector interface { - DetectProvisioner(d drivers.Driver) (Provisioner, error) -} - -type StandardDetector struct{} - -func SetDetector(newDetector Detector) { - detector = newDetector -} - -// Provisioner defines distribution specific actions -type Provisioner interface { - fmt.Stringer - SSHCommander - - // Create the files for the daemon to consume configuration settings (return struct of content and path) - GenerateDockerOptions(dockerPort int) (*DockerOptions, error) - - // Get the directory where the settings files for docker are to be found - GetDockerOptionsDir() string - - // Return the auth options used to configure remote connection for the daemon. - GetAuthOptions() auth.Options - - // Get the swarm options associated with this host. - GetSwarmOptions() swarm.Options - - // Run a package action e.g. install - Package(name string, action pkgaction.PackageAction) error - - // Get Hostname - Hostname() (string, error) - - // Set hostname - SetHostname(hostname string) error - - // Figure out if this is the right provisioner to use based on /etc/os-release info - CompatibleWithHost() bool - - // Do the actual provisioning piece: - // 1. Set the hostname on the instance. - // 2. Install Docker if it is not present. - // 3. Configure the daemon to accept connections over TLS. - // 4. Copy the needed certificates to the server and local config dir. - // 5. Configure / activate swarm if applicable. - Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error - - // Perform action on a named service e.g. stop - Service(name string, action serviceaction.ServiceAction) error - - // Get the driver which is contained in the provisioner. - GetDriver() drivers.Driver - - // Set the OS Release info depending on how it's represented - // internally - SetOsReleaseInfo(info *OsRelease) - - // Get the OS Release info for the current provisioner - GetOsReleaseInfo() (*OsRelease, error) -} - -// RegisteredProvisioner creates a new provisioner -type RegisteredProvisioner struct { - New func(d drivers.Driver) Provisioner -} - -func Register(name string, p *RegisteredProvisioner) { - provisioners[name] = p -} - -func DetectProvisioner(d drivers.Driver) (Provisioner, error) { - return detector.DetectProvisioner(d) -} - -func (detector StandardDetector) DetectProvisioner(d drivers.Driver) (Provisioner, error) { - log.Info("Waiting for SSH to be available...") - if err := drivers.WaitForSSH(d); err != nil { - return nil, err - } - - log.Info("Detecting the provisioner...") - - osReleaseOut, err := drivers.RunSSHCommandFromDriver(d, "cat /etc/os-release") - if err != nil { - return nil, fmt.Errorf("Error getting SSH command: %s", err) - } - - osReleaseInfo, err := NewOsRelease([]byte(osReleaseOut)) - if err != nil { - return nil, fmt.Errorf("Error parsing /etc/os-release file: %s", err) - } - - for _, p := range provisioners { - provisioner := p.New(d) - provisioner.SetOsReleaseInfo(osReleaseInfo) - - if provisioner.CompatibleWithHost() { - log.Debugf("found compatible host: %s", osReleaseInfo.ID) - return provisioner, nil - } - } - - return nil, ErrDetectionFailed -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/rancheros.go b/vendor/github.com/docker/machine/libmachine/provision/rancheros.go deleted file mode 100644 index a980b2bb79a7..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/rancheros.go +++ /dev/null @@ -1,250 +0,0 @@ -package provision - -import ( - "bufio" - "fmt" - "net/http" - "strings" - - "github.com/docker/machine/commands/mcndirs" - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/state" - "github.com/docker/machine/libmachine/swarm" -) - -const ( - versionsURL = "http://releases.rancher.com/os/versions.yml" - isoURL = "https://github.com/rancherio/os/releases/download/%s/machine-rancheros.iso" - hostnameTmpl = `sudo mkdir -p /var/lib/rancher/conf/cloud-config.d/ -sudo tee /var/lib/rancher/conf/cloud-config.d/machine-hostname.yml << EOF -#cloud-config - -hostname: %s -EOF -` -) - -func init() { - Register("RancherOS", &RegisteredProvisioner{ - New: NewRancherProvisioner, - }) -} - -func NewRancherProvisioner(d drivers.Driver) Provisioner { - return &RancherProvisioner{ - GenericProvisioner{ - SSHCommander: GenericSSHCommander{Driver: d}, - DockerOptionsDir: "/var/lib/rancher/conf", - DaemonOptionsFile: "/var/lib/rancher/conf/docker", - OsReleaseID: "rancheros", - Driver: d, - }, - } -} - -type RancherProvisioner struct { - GenericProvisioner -} - -func (provisioner *RancherProvisioner) String() string { - return "rancheros" -} - -func (provisioner *RancherProvisioner) Service(name string, action serviceaction.ServiceAction) error { - command := fmt.Sprintf("sudo system-docker %s %s", action.String(), name) - - if _, err := provisioner.SSHCommand(command); err != nil { - return err - } - - return nil -} - -func (provisioner *RancherProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - if name == "docker" && action == pkgaction.Upgrade { - return provisioner.upgrade() - } - - switch action { - case pkgaction.Install: - packageAction = "enabled" - case pkgaction.Remove: - packageAction = "disable" - case pkgaction.Upgrade: - // TODO: support upgrade - packageAction = "upgrade" - } - - command := fmt.Sprintf("sudo rancherctl service %s %s", packageAction, name) - - if _, err := provisioner.SSHCommand(command); err != nil { - return err - } - - return nil -} - -func (provisioner *RancherProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - log.Debugf("Running RancherOS provisioner on %s", provisioner.Driver.GetMachineName()) - - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - if provisioner.EngineOptions.StorageDriver == "" { - provisioner.EngineOptions.StorageDriver = "overlay" - } else if provisioner.EngineOptions.StorageDriver != "overlay" { - return fmt.Errorf("Unsupported storage driver: %s", provisioner.EngineOptions.StorageDriver) - } - - log.Debugf("Setting hostname %s", provisioner.Driver.GetMachineName()) - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - for _, pkg := range provisioner.Packages { - log.Debugf("Installing package %s", pkg) - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - if engineOptions.InstallURL == drivers.DefaultEngineInstallURL { - log.Debugf("Skipping docker engine default: %s", engineOptions.InstallURL) - } else { - log.Debugf("Selecting docker engine: %s", engineOptions.InstallURL) - if err := selectDocker(provisioner, engineOptions.InstallURL); err != nil { - return err - } - } - - log.Debugf("Preparing certificates") - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - log.Debugf("Setting up certificates") - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - log.Debugf("Configuring swarm") - err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions) - return err -} - -func (provisioner *RancherProvisioner) SetHostname(hostname string) error { - // /etc/hosts is bind mounted from Docker, this is hack to that the generic provisioner doesn't try to mv /etc/hosts - if _, err := provisioner.SSHCommand("sed /127.0.1.1/d /etc/hosts > /tmp/hosts && cat /tmp/hosts | sudo tee /etc/hosts"); err != nil { - return err - } - - if err := provisioner.GenericProvisioner.SetHostname(hostname); err != nil { - return err - } - - if _, err := provisioner.SSHCommand(fmt.Sprintf(hostnameTmpl, hostname)); err != nil { - return err - } - - return nil -} - -func (provisioner *RancherProvisioner) upgrade() error { - switch provisioner.Driver.DriverName() { - case "virtualbox": - return provisioner.upgradeIso() - default: - log.Infof("Running upgrade") - if _, err := provisioner.SSHCommand("sudo rancherctl os upgrade -f --no-reboot"); err != nil { - return err - } - - log.Infof("Upgrade succeeded, rebooting") - // ignore errors here because the SSH connection will close - provisioner.SSHCommand("sudo reboot") - - return nil - } -} - -func (provisioner *RancherProvisioner) upgradeIso() error { - // Largely copied from Boot2Docker provisioner, we should find a way to share this code - log.Info("Stopping machine to do the upgrade...") - - if err := provisioner.Driver.Stop(); err != nil { - return err - } - - if err := mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Stopped)); err != nil { - return err - } - - machineName := provisioner.GetDriver().GetMachineName() - - log.Infof("Upgrading machine %s...", machineName) - - // TODO: Ideally, we should not read from mcndirs directory at all. - // The driver should be able to communicate how and where to place the - // relevant files. - b2dutils := mcnutils.NewB2dUtils(mcndirs.GetBaseDir()) - - url, err := provisioner.getLatestISOURL() - if err != nil { - return err - } - - if err := b2dutils.DownloadISOFromURL(url); err != nil { - return err - } - - // Copy the latest version of boot2docker ISO to the machine's directory - if err := b2dutils.CopyIsoToMachineDir("", machineName); err != nil { - return err - } - - log.Infof("Starting machine back up...") - - if err := provisioner.Driver.Start(); err != nil { - return err - } - - return mcnutils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Running)) -} - -func (provisioner *RancherProvisioner) getLatestISOURL() (string, error) { - log.Debugf("Reading %s", versionsURL) - resp, err := http.Get(versionsURL) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // Don't want to pull in yaml parser, we'll do this manually - scanner := bufio.NewScanner(resp.Body) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "current: ") { - log.Debugf("Found %s", line) - return fmt.Sprintf(isoURL, strings.Split(line, ":")[2]), err - } - } - - return "", fmt.Errorf("Failed to find current version") -} - -func selectDocker(p Provisioner, baseURL string) error { - // TODO: detect if its a cloud-init, or a ros setting - and use that.. - if output, err := p.SSHCommand(fmt.Sprintf("wget -O- %s | sh -", baseURL)); err != nil { - return fmt.Errorf("error selecting docker: (%s) %s", err, output) - } - - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/redhat.go b/vendor/github.com/docker/machine/libmachine/provision/redhat.go deleted file mode 100644 index 385bfde4f16d..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/redhat.go +++ /dev/null @@ -1,212 +0,0 @@ -package provision - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "text/template" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -var ( - ErrUnknownYumOsRelease = errors.New("unknown OS for Yum repository") - engineConfigTemplate = `[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:{{.DockerPort}} -H unix:///var/run/docker.sock --storage-driver {{.EngineOptions.StorageDriver}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }} -Environment={{range .EngineOptions.Env}}{{ printf "%q" . }} {{end}} -` - majorVersionRE = regexp.MustCompile(`^(\d+)(\..*)?`) -) - -type PackageListInfo struct { - OsRelease string - OsReleaseVersion string -} - -func init() { - Register("RedHat", &RegisteredProvisioner{ - New: func(d drivers.Driver) Provisioner { - return NewRedHatProvisioner("rhel", d) - }, - }) -} - -func NewRedHatProvisioner(osReleaseID string, d drivers.Driver) *RedHatProvisioner { - systemdProvisioner := NewSystemdProvisioner(osReleaseID, d) - systemdProvisioner.SSHCommander = RedHatSSHCommander{Driver: d} - return &RedHatProvisioner{ - systemdProvisioner, - } -} - -type RedHatProvisioner struct { - SystemdProvisioner -} - -func (provisioner *RedHatProvisioner) String() string { - return "redhat" -} - -func (provisioner *RedHatProvisioner) SetHostname(hostname string) error { - // we have to have SetHostname here as well to use the RedHat provisioner - // SSHCommand to add the tty allocation - if _, err := provisioner.SSHCommand(fmt.Sprintf( - "sudo hostname %s && echo %q | sudo tee /etc/hostname", - hostname, - hostname, - )); err != nil { - return err - } - - if _, err := provisioner.SSHCommand(fmt.Sprintf( - "if grep -xq 127.0.1.1.* /etc/hosts; then sudo sed -i 's/^127.0.1.1.*/127.0.1.1 %s/g' /etc/hosts; else echo '127.0.1.1 %s' | sudo tee -a /etc/hosts; fi", - hostname, - hostname, - )); err != nil { - return err - } - - return nil -} - -func (provisioner *RedHatProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - switch action { - case pkgaction.Install: - packageAction = "install" - case pkgaction.Remove: - packageAction = "remove" - case pkgaction.Purge: - packageAction = "remove" - case pkgaction.Upgrade: - packageAction = "upgrade" - } - - command := fmt.Sprintf("sudo -E yum %s -y %s", packageAction, name) - - if _, err := provisioner.SSHCommand(command); err != nil { - return err - } - - return nil -} - -func installDocker(provisioner *RedHatProvisioner) error { - if err := installDockerGeneric(provisioner, provisioner.EngineOptions.InstallURL); err != nil { - return err - } - - if err := provisioner.Service("docker", serviceaction.Restart); err != nil { - return err - } - - err := provisioner.Service("docker", serviceaction.Enable) - return err -} - -func (provisioner *RedHatProvisioner) dockerDaemonResponding() bool { - log.Debug("checking docker daemon") - - if out, err := provisioner.SSHCommand("sudo docker version"); err != nil { - log.Warnf("Error getting SSH command to check if the daemon is up: %s", err) - log.Debugf("'sudo docker version' output:\n%s", out) - return false - } - - // The daemon is up if the command worked. Carry on. - return true -} - -func (provisioner *RedHatProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - // set default storage driver for redhat - storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver) - if err != nil { - return err - } - provisioner.EngineOptions.StorageDriver = storageDriver - - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - for _, pkg := range provisioner.Packages { - log.Debugf("installing base package: name=%s", pkg) - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - // update OS -- this is needed for libdevicemapper and the docker install - if _, err := provisioner.SSHCommand("sudo -E yum -y update -x docker-*"); err != nil { - return err - } - - // install docker - if err := installDocker(provisioner); err != nil { - return err - } - - if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { - return err - } - - if err := makeDockerOptionsDir(provisioner); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - err = configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions) - return err -} - -func (provisioner *RedHatProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) { - var ( - engineCfg bytes.Buffer - configPath = provisioner.DaemonOptionsFile - ) - - driverNameLabel := fmt.Sprintf("provider=%s", provisioner.Driver.DriverName()) - provisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel) - - // systemd / redhat will not load options if they are on newlines - // instead, it just continues with a different set of options; yeah... - t, err := template.New("engineConfig").Parse(engineConfigTemplate) - if err != nil { - return nil, err - } - - engineConfigContext := EngineConfigContext{ - DockerPort: dockerPort, - AuthOptions: provisioner.AuthOptions, - EngineOptions: provisioner.EngineOptions, - DockerOptionsDir: provisioner.DockerOptionsDir, - } - - t.Execute(&engineCfg, engineConfigContext) - - daemonOptsDir := configPath - return &DockerOptions{ - EngineOptions: engineCfg.String(), - EngineOptionsPath: daemonOptsDir, - }, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/redhat_ssh_commander.go b/vendor/github.com/docker/machine/libmachine/provision/redhat_ssh_commander.go deleted file mode 100644 index b94e27ba2d0f..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/redhat_ssh_commander.go +++ /dev/null @@ -1,45 +0,0 @@ -package provision - -import ( - "fmt" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/ssh" -) - -type RedHatSSHCommander struct { - Driver drivers.Driver -} - -func (sshCmder RedHatSSHCommander) SSHCommand(args string) (string, error) { - client, err := drivers.GetSSHClientFromDriver(sshCmder.Driver) - if err != nil { - return "", err - } - - log.Debugf("About to run SSH command:\n%s", args) - - // redhat needs "-t" for tty allocation on ssh therefore we check for the - // external client and add as needed. - // Note: CentOS 7.0 needs multiple "-tt" to force tty allocation when ssh has - // no local tty. - var output string - switch c := client.(type) { - case *ssh.ExternalClient: - c.BaseArgs = append(c.BaseArgs, "-tt") - output, err = c.Output(args) - case *ssh.NativeClient: - output, err = c.OutputWithPty(args) - } - - log.Debugf("SSH cmd err, output: %v: %s", err, output) - if err != nil { - return "", fmt.Errorf(`something went wrong running an SSH command -command : %s -err : %v -output : %s`, args, err, output) - } - - return output, nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/serviceaction/service_action.go b/vendor/github.com/docker/machine/libmachine/provision/serviceaction/service_action.go deleted file mode 100644 index b2e22e5936bd..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/serviceaction/service_action.go +++ /dev/null @@ -1,29 +0,0 @@ -package serviceaction - -type ServiceAction int - -const ( - Restart ServiceAction = iota - Start - Stop - Enable - Disable - DaemonReload -) - -var serviceActions = []string{ - "restart", - "start", - "stop", - "enable", - "disable", - "daemon-reload", -} - -func (s ServiceAction) String() string { - if int(s) >= 0 && int(s) < len(serviceActions) { - return serviceActions[s] - } - - return "" -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/suse.go b/vendor/github.com/docker/machine/libmachine/provision/suse.go deleted file mode 100644 index 647a4e049478..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/suse.go +++ /dev/null @@ -1,195 +0,0 @@ -package provision - -import ( - "fmt" - "strings" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -func init() { - Register("SUSE", &RegisteredProvisioner{ - New: NewSUSEProvisioner, - }) -} - -func NewSUSEProvisioner(d drivers.Driver) Provisioner { - return &SUSEProvisioner{ - NewSystemdProvisioner("SUSE", d), - } -} - -type SUSEProvisioner struct { - SystemdProvisioner -} - -func (provisioner *SUSEProvisioner) CompatibleWithHost() bool { - ids := strings.Split(provisioner.OsReleaseInfo.IDLike, " ") - for _, id := range ids { - if id == "suse" { - return true - } - } - return false -} - -func (provisioner *SUSEProvisioner) String() string { - return "SUSE" -} - -func (provisioner *SUSEProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - switch action { - case pkgaction.Install: - packageAction = "in" - // This is an optimization that reduces the provisioning time of certain - // systems in a significant way. - // The invocation of "zypper in " causes the download of the metadata - // of all the repositories that have never been refreshed or that have - // automatic refresh toggled and have not been refreshed recently. - // Refreshing the repository metadata can take quite some time and can cause - // longer provisioning times for machines that have been pre-optimized for - // docker by including all the needed packages. - if _, err := provisioner.SSHCommand(fmt.Sprintf("rpm -q %s", name)); err == nil { - log.Debugf("%s is already installed, skipping operation", name) - return nil - } - case pkgaction.Remove: - packageAction = "rm" - case pkgaction.Upgrade: - packageAction = "up" - } - - command := fmt.Sprintf("sudo -E zypper -n %s %s", packageAction, name) - - log.Debugf("zypper: action=%s name=%s", action.String(), name) - - if _, err := provisioner.SSHCommand(command); err != nil { - return err - } - - return nil -} - -func (provisioner *SUSEProvisioner) dockerDaemonResponding() bool { - log.Debug("checking docker daemon") - - if out, err := provisioner.SSHCommand("sudo docker version"); err != nil { - log.Warnf("Error getting SSH command to check if the daemon is up: %s", err) - log.Debugf("'sudo docker version' output:\n%s", out) - return false - } - - // The daemon is up if the command worked. Carry on. - return true -} - -func (provisioner *SUSEProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - // figure out the filesystem used by /var/lib/docker - fs, err := provisioner.SSHCommand("stat -f -c %T /var/lib/docker") - if err != nil { - // figure out the filesystem used by /var/lib - fs, err = provisioner.SSHCommand("stat -f -c %T /var/lib/") - if err != nil { - return err - } - } - graphDriver := "overlay" - if strings.Contains(fs, "btrfs") { - graphDriver = "btrfs" - } - - storageDriver, err := decideStorageDriver(provisioner, graphDriver, engineOptions.StorageDriver) - if err != nil { - return err - } - provisioner.EngineOptions.StorageDriver = storageDriver - - log.Debug("Setting hostname") - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - if !strings.HasPrefix(strings.ToLower(provisioner.OsReleaseInfo.ID), "opensuse") { - // This is a SLE machine, enable the containers module to have access - // to the docker packages - if _, err := provisioner.SSHCommand("sudo -E SUSEConnect -p sle-module-containers/12/$(uname -m) -r ''"); err != nil { - return fmt.Errorf( - "Error while adding the 'containers' module, make sure this machine is registered either against SUSE Customer Center (SCC) or to a local Subscription Management Tool (SMT): %v", - err) - } - } - - log.Debug("Installing base packages") - for _, pkg := range provisioner.Packages { - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - log.Debug("Installing docker") - if err := provisioner.Package("docker", pkgaction.Install); err != nil { - return err - } - - // create symlinks for containerd, containerd-shim and optional runc. - // We have to do that because machine overrides the openSUSE systemd - // unit of docker - if _, err := provisioner.SSHCommand("yes no | sudo -E ln -si /usr/sbin/runc /usr/sbin/docker-runc"); err != nil { - return err - } - if _, err := provisioner.SSHCommand("sudo -E ln -sf /usr/sbin/containerd /usr/sbin/docker-containerd"); err != nil { - return err - } - if _, err := provisioner.SSHCommand("sudo -E ln -sf /usr/sbin/containerd-shim /usr/sbin/docker-containerd-shim"); err != nil { - return err - } - - // Is yast2 firewall installed? - if _, installed := provisioner.SSHCommand("rpm -q yast2-firewall"); installed == nil { - // Open the firewall port required by docker - if _, err := provisioner.SSHCommand("sudo -E /sbin/yast2 firewall services add ipprotocol=tcp tcpport=2376 zone=EXT"); err != nil { - return err - } - } - - log.Debug("Starting systemd docker service") - if err := provisioner.Service("docker", serviceaction.Start); err != nil { - return err - } - - log.Debug("Waiting for docker daemon") - if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - log.Debug("Configuring auth") - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - log.Debug("Configuring swarm") - if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { - return err - } - - // enable in systemd - log.Debug("Enabling docker in systemd") - err = provisioner.Service("docker", serviceaction.Enable) - return err -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/systemd.go b/vendor/github.com/docker/machine/libmachine/provision/systemd.go deleted file mode 100644 index 90d026032386..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/systemd.go +++ /dev/null @@ -1,101 +0,0 @@ -package provision - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/versioncmp" -) - -type SystemdProvisioner struct { - GenericProvisioner -} - -func (p *SystemdProvisioner) String() string { - return "redhat" -} - -func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) SystemdProvisioner { - return SystemdProvisioner{ - GenericProvisioner{ - SSHCommander: GenericSSHCommander{Driver: d}, - DockerOptionsDir: "/etc/docker", - DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", - OsReleaseID: osReleaseID, - Packages: []string{ - "curl", - }, - Driver: d, - }, - } -} - -func (p *SystemdProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) { - var ( - engineCfg bytes.Buffer - ) - - driverNameLabel := fmt.Sprintf("provider=%s", p.Driver.DriverName()) - p.EngineOptions.Labels = append(p.EngineOptions.Labels, driverNameLabel) - - dockerVersion, err := DockerClientVersion(p) - if err != nil { - return nil, err - } - - arg := "dockerd" - if versioncmp.LessThan(dockerVersion, "1.12.0") { - arg = "docker daemon" - } - - engineConfigTmpl := `[Service] -ExecStart= -ExecStart=/usr/bin/` + arg + ` -H tcp://0.0.0.0:{{.DockerPort}} -H unix:///var/run/docker.sock --storage-driver {{.EngineOptions.StorageDriver}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }} -Environment={{range .EngineOptions.Env}}{{ printf "%q" . }} {{end}} -` - t, err := template.New("engineConfig").Parse(engineConfigTmpl) - if err != nil { - return nil, err - } - - engineConfigContext := EngineConfigContext{ - DockerPort: dockerPort, - AuthOptions: p.AuthOptions, - EngineOptions: p.EngineOptions, - } - - t.Execute(&engineCfg, engineConfigContext) - - return &DockerOptions{ - EngineOptions: engineCfg.String(), - EngineOptionsPath: p.DaemonOptionsFile, - }, nil -} - -func (p *SystemdProvisioner) Service(name string, action serviceaction.ServiceAction) error { - reloadDaemon := false - switch action { - case serviceaction.Start, serviceaction.Restart: - reloadDaemon = true - } - - // systemd needs reloaded when config changes on disk; we cannot - // be sure exactly when it changes from the provisioner so - // we call a reload on every restart to be safe - if reloadDaemon { - if _, err := p.SSHCommand("sudo systemctl daemon-reload"); err != nil { - return err - } - } - - command := fmt.Sprintf("sudo systemctl -f %s %s", action.String(), name) - - if _, err := p.SSHCommand(command); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/ubuntu_systemd.go b/vendor/github.com/docker/machine/libmachine/provision/ubuntu_systemd.go deleted file mode 100644 index 9cf16f9e5761..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/ubuntu_systemd.go +++ /dev/null @@ -1,149 +0,0 @@ -package provision - -import ( - "fmt" - "strconv" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -func init() { - Register("Ubuntu-SystemD", &RegisteredProvisioner{ - New: NewUbuntuSystemdProvisioner, - }) -} - -func NewUbuntuSystemdProvisioner(d drivers.Driver) Provisioner { - return &UbuntuSystemdProvisioner{ - NewSystemdProvisioner("ubuntu", d), - } -} - -type UbuntuSystemdProvisioner struct { - SystemdProvisioner -} - -func (provisioner *UbuntuSystemdProvisioner) String() string { - return "ubuntu(systemd)" -} - -func (provisioner *UbuntuSystemdProvisioner) CompatibleWithHost() bool { - const FirstUbuntuSystemdVersion = 15.04 - - isUbuntu := provisioner.OsReleaseInfo.ID == provisioner.OsReleaseID - if !isUbuntu { - return false - } - versionNumber, err := strconv.ParseFloat(provisioner.OsReleaseInfo.VersionID, 64) - if err != nil { - return false - } - return versionNumber >= FirstUbuntuSystemdVersion - -} - -func (provisioner *UbuntuSystemdProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - updateMetadata := true - - switch action { - case pkgaction.Install, pkgaction.Upgrade: - packageAction = "install" - case pkgaction.Remove: - packageAction = "remove" - updateMetadata = false - case pkgaction.Purge: - packageAction = "purge" - updateMetadata = false - } - - switch name { - case "docker": - name = "docker-ce" - } - - if updateMetadata { - if err := waitForLockAptGetUpdate(provisioner); err != nil { - return err - } - } - - command := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive sudo -E apt-get %s -y %s", packageAction, name) - - log.Debugf("package: action=%s name=%s", action.String(), name) - - return waitForLock(provisioner, command) -} - -func (provisioner *UbuntuSystemdProvisioner) dockerDaemonResponding() bool { - log.Debug("checking docker daemon") - - if out, err := provisioner.SSHCommand("sudo docker version"); err != nil { - log.Warnf("Error getting SSH command to check if the daemon is up: %s", err) - log.Debugf("'sudo docker version' output:\n%s", out) - return false - } - - // The daemon is up if the command worked. Carry on. - return true -} - -func (provisioner *UbuntuSystemdProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver) - if err != nil { - return err - } - provisioner.EngineOptions.StorageDriver = storageDriver - - log.Debug("setting hostname") - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - log.Debug("installing base packages") - for _, pkg := range provisioner.Packages { - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - log.Info("Installing Docker...") - if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { - return err - } - - log.Debug("waiting for docker daemon") - if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - log.Debug("configuring auth") - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - log.Debug("configuring swarm") - if err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil { - return err - } - - // enable in systemd - log.Debug("enabling docker in systemd") - err = provisioner.Service("docker", serviceaction.Enable) - return err -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/ubuntu_upstart.go b/vendor/github.com/docker/machine/libmachine/provision/ubuntu_upstart.go deleted file mode 100644 index 7f62ab232864..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/ubuntu_upstart.go +++ /dev/null @@ -1,158 +0,0 @@ -package provision - -import ( - "fmt" - "strconv" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" - "github.com/docker/machine/libmachine/swarm" -) - -func init() { - Register("Ubuntu-UpStart", &RegisteredProvisioner{ - New: NewUbuntuProvisioner, - }) -} - -func NewUbuntuProvisioner(d drivers.Driver) Provisioner { - return &UbuntuProvisioner{ - GenericProvisioner{ - SSHCommander: GenericSSHCommander{Driver: d}, - DockerOptionsDir: "/etc/docker", - DaemonOptionsFile: "/etc/default/docker", - OsReleaseID: "ubuntu", - Packages: []string{ - "curl", - }, - Driver: d, - }, - } -} - -type UbuntuProvisioner struct { - GenericProvisioner -} - -func (provisioner *UbuntuProvisioner) String() string { - return "ubuntu(upstart)" -} - -func (provisioner *UbuntuProvisioner) CompatibleWithHost() bool { - const FirstUbuntuSystemdVersion = 15.04 - isUbuntu := provisioner.OsReleaseInfo.ID == provisioner.OsReleaseID - if !isUbuntu { - return false - } - versionNumber, err := strconv.ParseFloat(provisioner.OsReleaseInfo.VersionID, 64) - if err != nil { - return false - } - - return versionNumber < FirstUbuntuSystemdVersion - -} - -func (provisioner *UbuntuProvisioner) Service(name string, action serviceaction.ServiceAction) error { - command := fmt.Sprintf("sudo service %s %s", name, action.String()) - - if _, err := provisioner.SSHCommand(command); err != nil { - return err - } - - return nil -} - -func (provisioner *UbuntuProvisioner) Package(name string, action pkgaction.PackageAction) error { - var packageAction string - - updateMetadata := true - - switch action { - case pkgaction.Install, pkgaction.Upgrade: - packageAction = "install" - case pkgaction.Remove, pkgaction.Purge: - packageAction = "remove" - updateMetadata = false - } - - switch name { - case "docker": - name = "docker-engine" - } - - if updateMetadata { - if err := waitForLockAptGetUpdate(provisioner); err != nil { - return err - } - } - - command := fmt.Sprintf("DEBIAN_FRONTEND=noninteractive sudo -E apt-get %s -y -o Dpkg::Options::=\"--force-confnew\" %s", packageAction, name) - - log.Debugf("package: action=%s name=%s", action.String(), name) - - return waitForLock(provisioner, command) -} - -func (provisioner *UbuntuProvisioner) dockerDaemonResponding() bool { - log.Debug("checking docker daemon") - - if out, err := provisioner.SSHCommand("sudo docker version"); err != nil { - log.Warnf("Error getting SSH command to check if the daemon is up: %s", err) - log.Debugf("'sudo docker version' output:\n%s", out) - return false - } - - // The daemon is up if the command worked. Carry on. - return true -} - -func (provisioner *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error { - provisioner.SwarmOptions = swarmOptions - provisioner.AuthOptions = authOptions - provisioner.EngineOptions = engineOptions - swarmOptions.Env = engineOptions.Env - - storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver) - if err != nil { - return err - } - provisioner.EngineOptions.StorageDriver = storageDriver - - if err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil { - return err - } - - for _, pkg := range provisioner.Packages { - if err := provisioner.Package(pkg, pkgaction.Install); err != nil { - return err - } - } - - log.Info("Installing Docker...") - if err := installDockerGeneric(provisioner, engineOptions.InstallURL); err != nil { - return err - } - - if err := mcnutils.WaitFor(provisioner.dockerDaemonResponding); err != nil { - return err - } - - if err := makeDockerOptionsDir(provisioner); err != nil { - return err - } - - provisioner.AuthOptions = setRemoteAuthOptions(provisioner) - - if err := ConfigureAuth(provisioner); err != nil { - return err - } - - err = configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions) - return err -} diff --git a/vendor/github.com/docker/machine/libmachine/provision/utils.go b/vendor/github.com/docker/machine/libmachine/provision/utils.go deleted file mode 100644 index e91ed85666c5..000000000000 --- a/vendor/github.com/docker/machine/libmachine/provision/utils.go +++ /dev/null @@ -1,322 +0,0 @@ -package provision - -import ( - "fmt" - "io/ioutil" - "net/url" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - "time" - - "github.com/docker/machine/libmachine/auth" - "github.com/docker/machine/libmachine/cert" - "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "github.com/docker/machine/libmachine/provision/serviceaction" -) - -type DockerOptions struct { - EngineOptions string - EngineOptionsPath string -} - -func installDockerGeneric(p Provisioner, baseURL string) error { - // install docker - until cloudinit we use ubuntu everywhere so we - // just install it using the docker repos - if output, err := p.SSHCommand(fmt.Sprintf("if ! type docker; then curl -sSL %s | sh -; fi", baseURL)); err != nil { - return fmt.Errorf("error installing docker: %s", output) - } - - return nil -} - -func makeDockerOptionsDir(p Provisioner) error { - dockerDir := p.GetDockerOptionsDir() - if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s", dockerDir)); err != nil { - return err - } - - return nil -} - -func setRemoteAuthOptions(p Provisioner) auth.Options { - dockerDir := p.GetDockerOptionsDir() - authOptions := p.GetAuthOptions() - - // due to windows clients, we cannot use filepath.Join as the paths - // will be mucked on the linux hosts - authOptions.CaCertRemotePath = path.Join(dockerDir, "ca.pem") - authOptions.ServerCertRemotePath = path.Join(dockerDir, "server.pem") - authOptions.ServerKeyRemotePath = path.Join(dockerDir, "server-key.pem") - - return authOptions -} - -func ConfigureAuth(p Provisioner) error { - var ( - err error - ) - - driver := p.GetDriver() - machineName := driver.GetMachineName() - authOptions := p.GetAuthOptions() - swarmOptions := p.GetSwarmOptions() - org := mcnutils.GetUsername() + "." + machineName - bits := 2048 - - ip, err := driver.GetIP() - if err != nil { - return err - } - - log.Info("Copying certs to the local machine directory...") - - if err := mcnutils.CopyFile(authOptions.CaCertPath, filepath.Join(authOptions.StorePath, "ca.pem")); err != nil { - return fmt.Errorf("Copying ca.pem to machine dir failed: %s", err) - } - - if err := mcnutils.CopyFile(authOptions.ClientCertPath, filepath.Join(authOptions.StorePath, "cert.pem")); err != nil { - return fmt.Errorf("Copying cert.pem to machine dir failed: %s", err) - } - - if err := mcnutils.CopyFile(authOptions.ClientKeyPath, filepath.Join(authOptions.StorePath, "key.pem")); err != nil { - return fmt.Errorf("Copying key.pem to machine dir failed: %s", err) - } - - // The Host IP is always added to the certificate's SANs list - hosts := append(authOptions.ServerCertSANs, ip, "localhost") - log.Debugf("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s", - authOptions.ServerCertPath, - authOptions.CaCertPath, - authOptions.CaPrivateKeyPath, - org, - hosts, - ) - - // TODO: Switch to passing just authOptions to this func - // instead of all these individual fields - err = cert.GenerateCert(&cert.Options{ - Hosts: hosts, - CertFile: authOptions.ServerCertPath, - KeyFile: authOptions.ServerKeyPath, - CAFile: authOptions.CaCertPath, - CAKeyFile: authOptions.CaPrivateKeyPath, - Org: org, - Bits: bits, - SwarmMaster: swarmOptions.Master, - }) - - if err != nil { - return fmt.Errorf("error generating server cert: %s", err) - } - - if err := p.Service("docker", serviceaction.Stop); err != nil { - return err - } - - if _, err := p.SSHCommand(`if [ ! -z "$(ip link show docker0)" ]; then sudo ip link delete docker0; fi`); err != nil { - return err - } - - // upload certs and configure TLS auth - caCert, err := ioutil.ReadFile(authOptions.CaCertPath) - if err != nil { - return err - } - - serverCert, err := ioutil.ReadFile(authOptions.ServerCertPath) - if err != nil { - return err - } - serverKey, err := ioutil.ReadFile(authOptions.ServerKeyPath) - if err != nil { - return err - } - - log.Info("Copying certs to the remote machine...") - - // printf will choke if we don't pass a format string because of the - // dashes, so that's the reason for the '%%s' - certTransferCmdFmt := "printf '%%s' '%s' | sudo tee %s" - - // These ones are for Jessie and Mike <3 <3 <3 - if _, err := p.SSHCommand(fmt.Sprintf(certTransferCmdFmt, string(caCert), authOptions.CaCertRemotePath)); err != nil { - return err - } - - if _, err := p.SSHCommand(fmt.Sprintf(certTransferCmdFmt, string(serverCert), authOptions.ServerCertRemotePath)); err != nil { - return err - } - - if _, err := p.SSHCommand(fmt.Sprintf(certTransferCmdFmt, string(serverKey), authOptions.ServerKeyRemotePath)); err != nil { - return err - } - - dockerURL, err := driver.GetURL() - if err != nil { - return err - } - u, err := url.Parse(dockerURL) - if err != nil { - return err - } - dockerPort := engine.DefaultPort - parts := strings.Split(u.Host, ":") - if len(parts) == 2 { - dPort, err := strconv.Atoi(parts[1]) - if err != nil { - return err - } - dockerPort = dPort - } - - dkrcfg, err := p.GenerateDockerOptions(dockerPort) - if err != nil { - return err - } - - log.Info("Setting Docker configuration on the remote daemon...") - - if _, err = p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(dkrcfg.EngineOptionsPath), dkrcfg.EngineOptions, dkrcfg.EngineOptionsPath)); err != nil { - return err - } - - if err := p.Service("docker", serviceaction.Start); err != nil { - return err - } - - return WaitForDocker(p, dockerPort) -} - -func matchNetstatOut(reDaemonListening, netstatOut string) bool { - // TODO: I would really prefer this be a Scanner directly on - // the STDOUT of the executed command than to do all the string - // manipulation hokey-pokey. - // - // TODO: Unit test this matching. - for _, line := range strings.Split(netstatOut, "\n") { - match, err := regexp.MatchString(reDaemonListening, line) - if err != nil { - log.Warnf("Regex warning: %s", err) - } - if match && line != "" { - return true - } - } - - return false -} - -func decideStorageDriver(p Provisioner, defaultDriver, suppliedDriver string) (string, error) { - if suppliedDriver != "" { - return suppliedDriver, nil - } - bestSuitedDriver := "" - - defer func() { - if bestSuitedDriver != "" { - log.Debugf("No storagedriver specified, using %s\n", bestSuitedDriver) - } - }() - - if defaultDriver != "aufs" { - bestSuitedDriver = defaultDriver - } else { - remoteFilesystemType, err := getFilesystemType(p, "/var/lib") - if err != nil { - return "", err - } - if remoteFilesystemType == "btrfs" { - bestSuitedDriver = "btrfs" - } else { - bestSuitedDriver = defaultDriver - } - } - return bestSuitedDriver, nil - -} - -func getFilesystemType(p Provisioner, directory string) (string, error) { - statCommandOutput, err := p.SSHCommand("stat -f -c %T " + directory) - if err != nil { - err = fmt.Errorf("Error looking up filesystem type: %s", err) - return "", err - } - - fstype := strings.TrimSpace(statCommandOutput) - return fstype, nil -} - -func checkDaemonUp(p Provisioner, dockerPort int) func() bool { - reDaemonListening := fmt.Sprintf(":%d\\s+.*:.*", dockerPort) - return func() bool { - // HACK: Check netstat's output to see if anyone's listening on the Docker API port. - netstatOut, err := p.SSHCommand("if ! type netstat 1>/dev/null; then ss -tln; else netstat -tln; fi") - if err != nil { - log.Warnf("Error running SSH command: %s", err) - return false - } - - return matchNetstatOut(reDaemonListening, netstatOut) - } -} - -func WaitForDocker(p Provisioner, dockerPort int) error { - if err := mcnutils.WaitForSpecific(checkDaemonUp(p, dockerPort), 10, 3*time.Second); err != nil { - return NewErrDaemonAvailable(err) - } - - return nil -} - -// DockerClientVersion returns the version of the Docker client on the host -// that ssh is connected to, e.g. "1.12.1". -func DockerClientVersion(ssh SSHCommander) (string, error) { - // `docker version --format {{.Client.Version}}` would be preferable, but - // that fails if the server isn't running yet. - // - // output is expected to be something like - // - // Docker version 1.12.1, build 7a86f89 - output, err := ssh.SSHCommand("docker --version") - if err != nil { - return "", err - } - - words := strings.Fields(output) - if len(words) < 3 || words[0] != "Docker" || words[1] != "version" { - return "", fmt.Errorf("DockerClientVersion: cannot parse version string from %q", output) - } - - return strings.TrimRight(words[2], ","), nil -} - -func waitForLockAptGetUpdate(ssh SSHCommander) error { - return waitForLock(ssh, "sudo apt-get update") -} - -func waitForLock(ssh SSHCommander, cmd string) error { - var sshErr error - err := mcnutils.WaitFor(func() bool { - _, sshErr = ssh.SSHCommand(cmd) - if sshErr != nil { - if strings.Contains(sshErr.Error(), "Could not get lock") { - sshErr = nil - return false - } - return true - } - return true - }) - if sshErr != nil { - return fmt.Errorf("Error running %q: %s", cmd, sshErr) - } - if err != nil { - return fmt.Errorf("Failed to obtain lock: %s", err) - } - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/shell/shell.go b/vendor/github.com/docker/machine/libmachine/shell/shell.go deleted file mode 100644 index d0a271c9a9ac..000000000000 --- a/vendor/github.com/docker/machine/libmachine/shell/shell.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package shell - -import ( - "errors" - "fmt" - "os" - "path/filepath" -) - -var ( - ErrUnknownShell = errors.New("Error: Unknown shell") -) - -// Detect detects user's current shell. -func Detect() (string, error) { - shell := os.Getenv("SHELL") - - if shell == "" { - fmt.Printf("The default lines below are for a sh/bash shell, you can specify the shell you're using, with the --shell flag.\n\n") - return "", ErrUnknownShell - } - - return filepath.Base(shell), nil -} diff --git a/vendor/github.com/docker/machine/libmachine/shell/shell_windows.go b/vendor/github.com/docker/machine/libmachine/shell/shell_windows.go deleted file mode 100644 index 89cd2c8b0a76..000000000000 --- a/vendor/github.com/docker/machine/libmachine/shell/shell_windows.go +++ /dev/null @@ -1,84 +0,0 @@ -package shell - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "syscall" - "unsafe" -) - -// re-implementation of private function in https://github.com/golang/go/blob/master/src/syscall/syscall_windows.go#L945 -func getProcessEntry(pid int) (pe *syscall.ProcessEntry32, err error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry syscall.ProcessEntry32 - processEntry.Size = uint32(unsafe.Sizeof(processEntry)) - err = syscall.Process32First(snapshot, &processEntry) - if err != nil { - return nil, err - } - - for { - if processEntry.ProcessID == uint32(pid) { - pe = &processEntry - return - } - - err = syscall.Process32Next(snapshot, &processEntry) - if err != nil { - return nil, err - } - } -} - -// getNameAndItsPpid returns the exe file name its parent process id. -func getNameAndItsPpid(pid int) (exefile string, parentid int, err error) { - pe, err := getProcessEntry(pid) - if err != nil { - return "", 0, err - } - - name := syscall.UTF16ToString(pe.ExeFile[:]) - return name, int(pe.ParentProcessID), nil -} - -func Detect() (string, error) { - shell := os.Getenv("SHELL") - - if shell == "" { - shell, shellppid, err := getNameAndItsPpid(os.Getppid()) - if err != nil { - return "cmd", err // defaulting to cmd - } - if strings.Contains(strings.ToLower(shell), "powershell") { - return "powershell", nil - } else if strings.Contains(strings.ToLower(shell), "cmd") { - return "cmd", nil - } else { - shell, _, err := getNameAndItsPpid(shellppid) - if err != nil { - return "cmd", err // defaulting to cmd - } - if strings.Contains(strings.ToLower(shell), "powershell") { - return "powershell", nil - } else if strings.Contains(strings.ToLower(shell), "cmd") { - return "cmd", nil - } else { - fmt.Printf("You can further specify your shell with either 'cmd' or 'powershell' with the --shell flag.\n\n") - return "cmd", nil // this could be either powershell or cmd, defaulting to cmd - } - } - } - - if os.Getenv("__fish_bin_dir") != "" { - return "fish", nil - } - - return filepath.Base(shell), nil -} diff --git a/vendor/github.com/docker/machine/libmachine/ssh/client.go b/vendor/github.com/docker/machine/libmachine/ssh/client.go deleted file mode 100644 index 780b11a5ff22..000000000000 --- a/vendor/github.com/docker/machine/libmachine/ssh/client.go +++ /dev/null @@ -1,447 +0,0 @@ -package ssh - -import ( - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "runtime" - "strconv" - "strings" - - "github.com/docker/docker/pkg/term" - "github.com/docker/machine/libmachine/log" - "github.com/docker/machine/libmachine/mcnutils" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/terminal" -) - -type Client interface { - Output(command string) (string, error) - Shell(args ...string) error - - // Start starts the specified command without waiting for it to finish. You - // have to call the Wait function for that. - // - // The first two io.ReadCloser are the standard output and the standard - // error of the executing command respectively. The returned error follows - // the same logic as in the exec.Cmd.Start function. - Start(command string) (io.ReadCloser, io.ReadCloser, error) - - // Wait waits for the command started by the Start function to exit. The - // returned error follows the same logic as in the exec.Cmd.Wait function. - Wait() error -} - -type ExternalClient struct { - BaseArgs []string - BinaryPath string - cmd *exec.Cmd -} - -type NativeClient struct { - Config ssh.ClientConfig - Hostname string - Port int - openSession *ssh.Session - openClient *ssh.Client -} - -type Auth struct { - Passwords []string - Keys []string -} - -type ClientType string - -const ( - maxDialAttempts = 10 -) - -const ( - External ClientType = "external" - Native ClientType = "native" -) - -var ( - baseSSHArgs = []string{ - "-F", "/dev/null", - "-o", "ConnectionAttempts=3", // retry 3 times if SSH connection fails - "-o", "ConnectTimeout=10", // timeout after 10 seconds - "-o", "ControlMaster=no", // disable ssh multiplexing - "-o", "ControlPath=none", - "-o", "LogLevel=quiet", // suppress "Warning: Permanently added '[localhost]:2022' (ECDSA) to the list of known hosts." - "-o", "PasswordAuthentication=no", - "-o", "ServerAliveInterval=60", // prevents connection to be dropped if command takes too long - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - } - defaultClientType = External -) - -func SetDefaultClient(clientType ClientType) { - // Allow over-riding of default client type, so that even if ssh binary - // is found in PATH we can still use the Go native implementation if - // desired. - switch clientType { - case External: - defaultClientType = External - case Native: - defaultClientType = Native - } -} - -func NewClient(user string, host string, port int, auth *Auth) (Client, error) { - sshBinaryPath, err := exec.LookPath("ssh") - if err != nil { - log.Debug("SSH binary not found, using native Go implementation") - client, err := NewNativeClient(user, host, port, auth) - log.Debug(client) - return client, err - } - - if defaultClientType == Native { - log.Debug("Using SSH client type: native") - client, err := NewNativeClient(user, host, port, auth) - log.Debug(client) - return client, err - } - - log.Debug("Using SSH client type: external") - client, err := NewExternalClient(sshBinaryPath, user, host, port, auth) - log.Debug(client) - return client, err -} - -func NewNativeClient(user, host string, port int, auth *Auth) (Client, error) { - config, err := NewNativeConfig(user, auth) - if err != nil { - return nil, fmt.Errorf("Error getting config for native Go SSH: %s", err) - } - - return &NativeClient{ - Config: config, - Hostname: host, - Port: port, - }, nil -} - -func NewNativeConfig(user string, auth *Auth) (ssh.ClientConfig, error) { - var ( - authMethods []ssh.AuthMethod - ) - - for _, k := range auth.Keys { - key, err := ioutil.ReadFile(k) - if err != nil { - return ssh.ClientConfig{}, err - } - - privateKey, err := ssh.ParsePrivateKey(key) - if err != nil { - return ssh.ClientConfig{}, err - } - - authMethods = append(authMethods, ssh.PublicKeys(privateKey)) - } - - for _, p := range auth.Passwords { - authMethods = append(authMethods, ssh.Password(p)) - } - - return ssh.ClientConfig{ - User: user, - Auth: authMethods, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - }, nil -} - -func (client *NativeClient) dialSuccess() bool { - conn, err := ssh.Dial("tcp", net.JoinHostPort(client.Hostname, strconv.Itoa(client.Port)), &client.Config) - if err != nil { - log.Debugf("Error dialing TCP: %s", err) - return false - } - closeConn(conn) - return true -} - -func (client *NativeClient) session(command string) (*ssh.Client, *ssh.Session, error) { - if err := mcnutils.WaitFor(client.dialSuccess); err != nil { - return nil, nil, fmt.Errorf("Error attempting SSH client dial: %s", err) - } - - conn, err := ssh.Dial("tcp", net.JoinHostPort(client.Hostname, strconv.Itoa(client.Port)), &client.Config) - if err != nil { - return nil, nil, fmt.Errorf("Mysterious error dialing TCP for SSH (we already succeeded at least once) : %s", err) - } - session, err := conn.NewSession() - - return conn, session, err -} - -func (client *NativeClient) Output(command string) (string, error) { - conn, session, err := client.session(command) - if err != nil { - return "", nil - } - defer closeConn(conn) - defer session.Close() - - output, err := session.CombinedOutput(command) - - return string(output), err -} - -func (client *NativeClient) OutputWithPty(command string) (string, error) { - conn, session, err := client.session(command) - if err != nil { - return "", nil - } - defer closeConn(conn) - defer session.Close() - - fd := int(os.Stdout.Fd()) - - termWidth, termHeight, err := terminal.GetSize(fd) - if err != nil { - return "", err - } - - modes := ssh.TerminalModes{ - ssh.ECHO: 0, - ssh.TTY_OP_ISPEED: 14400, - ssh.TTY_OP_OSPEED: 14400, - } - - // request tty -- fixes error with hosts that use - // "Defaults requiretty" in /etc/sudoers - I'm looking at you RedHat - if err := session.RequestPty("xterm", termHeight, termWidth, modes); err != nil { - return "", err - } - - output, err := session.CombinedOutput(command) - - return string(output), err -} - -func (client *NativeClient) Start(command string) (io.ReadCloser, io.ReadCloser, error) { - conn, session, err := client.session(command) - if err != nil { - return nil, nil, err - } - - stdout, err := session.StdoutPipe() - if err != nil { - return nil, nil, err - } - stderr, err := session.StderrPipe() - if err != nil { - return nil, nil, err - } - if err := session.Start(command); err != nil { - return nil, nil, err - } - - client.openClient = conn - client.openSession = session - return ioutil.NopCloser(stdout), ioutil.NopCloser(stderr), nil -} - -func (client *NativeClient) Wait() error { - err := client.openSession.Wait() - if err != nil { - return err - } - - _ = client.openSession.Close() - - err = client.openClient.Close() - if err != nil { - return err - } - - client.openSession = nil - client.openClient = nil - return nil -} - -func (client *NativeClient) Shell(args ...string) error { - var ( - termWidth, termHeight int - ) - conn, err := ssh.Dial("tcp", net.JoinHostPort(client.Hostname, strconv.Itoa(client.Port)), &client.Config) - if err != nil { - return err - } - defer closeConn(conn) - - session, err := conn.NewSession() - if err != nil { - return err - } - - defer session.Close() - - session.Stdout = os.Stdout - session.Stderr = os.Stderr - session.Stdin = os.Stdin - - modes := ssh.TerminalModes{ - ssh.ECHO: 1, - } - - fd := os.Stdin.Fd() - - if term.IsTerminal(fd) { - oldState, err := term.MakeRaw(fd) - if err != nil { - return err - } - - defer term.RestoreTerminal(fd, oldState) - - winsize, err := term.GetWinsize(fd) - if err != nil { - termWidth = 80 - termHeight = 24 - } else { - termWidth = int(winsize.Width) - termHeight = int(winsize.Height) - } - } - - if err := session.RequestPty("xterm", termHeight, termWidth, modes); err != nil { - return err - } - - if len(args) == 0 { - if err := session.Shell(); err != nil { - return err - } - if err := session.Wait(); err != nil { - return err - } - } else { - if err := session.Run(strings.Join(args, " ")); err != nil { - return err - } - } - return nil -} - -func NewExternalClient(sshBinaryPath, user, host string, port int, auth *Auth) (*ExternalClient, error) { - client := &ExternalClient{ - BinaryPath: sshBinaryPath, - } - - args := append(baseSSHArgs, fmt.Sprintf("%s@%s", user, host)) - - // If no identities are explicitly provided, also look at the identities - // offered by ssh-agent - if len(auth.Keys) > 0 { - args = append(args, "-o", "IdentitiesOnly=yes") - } - - // Specify which private keys to use to authorize the SSH request. - for _, privateKeyPath := range auth.Keys { - if privateKeyPath != "" { - // Check each private key before use it - fi, err := os.Stat(privateKeyPath) - if err != nil { - // Abort if key not accessible - return nil, err - } - if runtime.GOOS != "windows" { - mode := fi.Mode() - log.Debugf("Using SSH private key: %s (%s)", privateKeyPath, mode) - // Private key file should have strict permissions - perm := mode.Perm() - if perm&0400 == 0 { - return nil, fmt.Errorf("'%s' is not readable", privateKeyPath) - } - if perm&0077 != 0 { - return nil, fmt.Errorf("permissions %#o for '%s' are too open", perm, privateKeyPath) - } - } - args = append(args, "-i", privateKeyPath) - } - } - - // Set which port to use for SSH. - args = append(args, "-p", fmt.Sprintf("%d", port)) - - client.BaseArgs = args - - return client, nil -} - -func getSSHCmd(binaryPath string, args ...string) *exec.Cmd { - return exec.Command(binaryPath, args...) -} - -func (client *ExternalClient) Output(command string) (string, error) { - args := append(client.BaseArgs, command) - cmd := getSSHCmd(client.BinaryPath, args...) - output, err := cmd.CombinedOutput() - return string(output), err -} - -func (client *ExternalClient) Shell(args ...string) error { - args = append(client.BaseArgs, args...) - cmd := getSSHCmd(client.BinaryPath, args...) - - log.Debug(cmd) - - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - return cmd.Run() -} - -func (client *ExternalClient) Start(command string) (io.ReadCloser, io.ReadCloser, error) { - args := append(client.BaseArgs, command) - cmd := getSSHCmd(client.BinaryPath, args...) - - log.Debug(cmd) - - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, nil, err - } - stderr, err := cmd.StderrPipe() - if err != nil { - if closeErr := stdout.Close(); closeErr != nil { - return nil, nil, fmt.Errorf("%s, %s", err, closeErr) - } - return nil, nil, err - } - if err := cmd.Start(); err != nil { - stdOutCloseErr := stdout.Close() - stdErrCloseErr := stderr.Close() - if stdOutCloseErr != nil || stdErrCloseErr != nil { - return nil, nil, fmt.Errorf("%s, %s, %s", - err, stdOutCloseErr, stdErrCloseErr) - } - return nil, nil, err - } - - client.cmd = cmd - return stdout, stderr, nil -} - -func (client *ExternalClient) Wait() error { - err := client.cmd.Wait() - client.cmd = nil - return err -} - -func closeConn(c io.Closer) { - err := c.Close() - if err != nil { - log.Debugf("Error closing SSH Client: %s", err) - } -} diff --git a/vendor/github.com/docker/machine/libmachine/ssh/keys.go b/vendor/github.com/docker/machine/libmachine/ssh/keys.go deleted file mode 100644 index 4d806419c120..000000000000 --- a/vendor/github.com/docker/machine/libmachine/ssh/keys.go +++ /dev/null @@ -1,124 +0,0 @@ -package ssh - -import ( - "crypto/md5" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "os" - "runtime" - - gossh "golang.org/x/crypto/ssh" -) - -var ( - ErrKeyGeneration = errors.New("Unable to generate key") - ErrValidation = errors.New("Unable to validate key") - ErrPublicKey = errors.New("Unable to convert public key") - ErrUnableToWriteFile = errors.New("Unable to write file") -) - -type KeyPair struct { - PrivateKey []byte - PublicKey []byte -} - -// NewKeyPair generates a new SSH keypair -// This will return a private & public key encoded as DER. -func NewKeyPair() (keyPair *KeyPair, err error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, ErrKeyGeneration - } - - if err := priv.Validate(); err != nil { - return nil, ErrValidation - } - - privDer := x509.MarshalPKCS1PrivateKey(priv) - - pubSSH, err := gossh.NewPublicKey(&priv.PublicKey) - if err != nil { - return nil, ErrPublicKey - } - - return &KeyPair{ - PrivateKey: privDer, - PublicKey: gossh.MarshalAuthorizedKey(pubSSH), - }, nil -} - -// WriteToFile writes keypair to files -func (kp *KeyPair) WriteToFile(privateKeyPath string, publicKeyPath string) error { - files := []struct { - File string - Type string - Value []byte - }{ - { - File: privateKeyPath, - Value: pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Headers: nil, Bytes: kp.PrivateKey}), - }, - { - File: publicKeyPath, - Value: kp.PublicKey, - }, - } - - for _, v := range files { - f, err := os.Create(v.File) - if err != nil { - return ErrUnableToWriteFile - } - - if _, err := f.Write(v.Value); err != nil { - return ErrUnableToWriteFile - } - - // windows does not support chmod - switch runtime.GOOS { - case "darwin", "freebsd", "linux", "openbsd": - if err := f.Chmod(0600); err != nil { - return err - } - } - } - - return nil -} - -// Fingerprint calculates the fingerprint of the public key -func (kp *KeyPair) Fingerprint() string { - b, _ := base64.StdEncoding.DecodeString(string(kp.PublicKey)) - h := md5.New() - - io.WriteString(h, string(b)) - - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// GenerateSSHKey generates SSH keypair based on path of the private key -// The public key would be generated to the same path with ".pub" added -func GenerateSSHKey(path string) error { - if _, err := os.Stat(path); err != nil { - if !os.IsNotExist(err) { - return fmt.Errorf("Desired directory for SSH keys does not exist: %s", err) - } - - kp, err := NewKeyPair() - if err != nil { - return fmt.Errorf("Error generating key pair: %s", err) - } - - if err := kp.WriteToFile(path, fmt.Sprintf("%s.pub", path)); err != nil { - return fmt.Errorf("Error writing keys to file(s): %s", err) - } - } - - return nil -} diff --git a/vendor/github.com/docker/machine/libmachine/state/state.go b/vendor/github.com/docker/machine/libmachine/state/state.go deleted file mode 100644 index 385a173be774..000000000000 --- a/vendor/github.com/docker/machine/libmachine/state/state.go +++ /dev/null @@ -1,36 +0,0 @@ -package state - -// State represents the state of a host -type State int - -const ( - None State = iota - Running - Paused - Saved - Stopped - Stopping - Starting - Error - Timeout -) - -var states = []string{ - "", - "Running", - "Paused", - "Saved", - "Stopped", - "Stopping", - "Starting", - "Error", - "Timeout", -} - -// Given a State type, returns its string representation -func (s State) String() string { - if int(s) >= 0 && int(s) < len(states) { - return states[s] - } - return "" -} diff --git a/vendor/github.com/docker/machine/libmachine/swarm/swarm.go b/vendor/github.com/docker/machine/libmachine/swarm/swarm.go deleted file mode 100644 index ee45288390df..000000000000 --- a/vendor/github.com/docker/machine/libmachine/swarm/swarm.go +++ /dev/null @@ -1,22 +0,0 @@ -package swarm - -const ( - DiscoveryServiceEndpoint = "https://discovery-stage.hub.docker.com/v1" -) - -type Options struct { - IsSwarm bool - Address string - Discovery string - Agent bool - Master bool - Host string - Image string - Strategy string - Heartbeat int - Overcommit float64 - ArbitraryFlags []string - ArbitraryJoinFlags []string - Env []string - IsExperimental bool -} diff --git a/vendor/github.com/docker/machine/libmachine/version/version.go b/vendor/github.com/docker/machine/libmachine/version/version.go deleted file mode 100644 index 3cacffdb0c0a..000000000000 --- a/vendor/github.com/docker/machine/libmachine/version/version.go +++ /dev/null @@ -1,11 +0,0 @@ -package version - -var ( - // APIVersion dictates which version of the libmachine API this is. - APIVersion = 1 - - // ConfigVersion dictates which version of the config.json format is - // used. It needs to be bumped if there is a breaking change, and - // therefore migration, introduced to the config file format. - ConfigVersion = 3 -) diff --git a/vendor/github.com/docker/machine/libmachine/versioncmp/compare.go b/vendor/github.com/docker/machine/libmachine/versioncmp/compare.go deleted file mode 100644 index 2df82cef6eb4..000000000000 --- a/vendor/github.com/docker/machine/libmachine/versioncmp/compare.go +++ /dev/null @@ -1,121 +0,0 @@ -// Package versioncmp provides functions for comparing version strings. -// -// Version strings are dot-separated integers with an optional -// pre-release suffix. A pre-release suffix is an arbitrary string with a -// leading dash character. All functions ignore these suffixes, so "1.2" and -// "1.2-rc" are considered equivalent. -package versioncmp - -import ( - "strconv" - "strings" -) - -const ( - rcString = "-rc" - ceEdition = "-ce" -) - -// compare compares two versions of Docker to decipher which came first. -// -// compare returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - // Replace RC string with "." to make the RC number appear as simply - // another sub-version. - v1 = strings.Replace(v1, rcString, ".", -1) - v2 = strings.Replace(v2, rcString, ".", -1) - - // All releases before the community edition (differentiated by - // presence of the "ce" string in the version string) are "less than" - // any community edition release (first occurring in March 2017). - if strings.Contains(v1, ceEdition) && !strings.Contains(v2, ceEdition) { - return 1 - } - if !strings.Contains(v1, ceEdition) && strings.Contains(v2, ceEdition) { - return -1 - } - - // Without this tag, both are pre-CE versions. - if !strings.Contains(v1, ceEdition) && !strings.Contains(v2, ceEdition) { - return compareNumeric(v1, v2) - } - - return compareCE(v1, v2) -} - -// compareCE ("Community Edition") will differentiate between versions of -// Docker that use the versioning scheme -// {{release-year}}.{{release-month}}-{{ce|ee}}-{{rcnum|""}} -// -// This will be every release after 1.13.1. -func compareCE(v1, v2 string) int { - return compareNumeric( - strings.Replace(v1, ceEdition, "", -1), - strings.Replace(v2, ceEdition, "", -1), - ) -} - -// compareNumeric compares two version that use pre-17.03 Docker. -// -// Non-numeric segments in either argument are considered equal, so -// compare("1.a", "1.b") == 0, but compare("2.a", "1.b") == 1. -func compareNumeric(v1, v2 string) int { - if n := strings.IndexByte(v1, '-'); n != -1 { - v1 = v1[:n] - } - if n := strings.IndexByte(v2, '-'); n != -1 { - v2 = v2[:n] - } - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another. -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another. -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another. -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to -// another. -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another. -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/machine/version/version.go b/vendor/github.com/docker/machine/version/version.go deleted file mode 100644 index 5e664465923b..000000000000 --- a/vendor/github.com/docker/machine/version/version.go +++ /dev/null @@ -1,23 +0,0 @@ -package version - -import ( - "fmt" - "strings" -) - -var ( - Version = "dev" - - // GitCommit will be overwritten automatically by the build system - GitCommit = "HEAD" -) - -// FullVersion formats the version to be printed -func FullVersion() string { - return fmt.Sprintf("%s, build %s", Version, GitCommit) -} - -// RC checks if the Machine version is a release candidate or not -func RC() bool { - return strings.Contains(Version, "rc") -} diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 71c47ce89392..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,44 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Adrien Bustany -Amit Krishnan -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Travis Cline -Tudor Golubenco -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index f21e54080090..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb881e6..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index d1d39a0ebdbd..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if e.Op&Create == Create { - buffer.WriteString("|CREATE") - } - if e.Op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if e.Op&Write == Write { - buffer.WriteString("|WRITE") - } - if e.Op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if e.Op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - - // If buffer remains empty, return no event names - if buffer.Len() == 0 { - return fmt.Sprintf("%q: ", e.Name) - } - - // Return a list of event names, with leading pipe character stripped - return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index 9700df55e9ac..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - cv *sync.Cond // sync removing on rm_watch with IN_IGNORE - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit() - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - w.cv = sync.NewCond(&w.mu) - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - watchEntry, found := w.watches[name] - w.mu.Unlock() - if found { - watchEntry.flags |= flags - flags |= unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - w.mu.Lock() - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - w.mu.Unlock() - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - // wait until ignoreLinux() deleting maps - exists := true - for exists { - w.cv.Wait() - _, exists = w.watches[name] - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name := w.paths[int(raw.Wd)] - w.mu.Unlock() - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(w, raw.Wd, mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - w.mu.Lock() - defer w.mu.Unlock() - name := w.paths[int(wd)] - delete(w.paths, int(wd)) - delete(w.watches, name) - w.cv.Broadcast() - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index cc7db4b22ef5..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index c2b4acb18ddf..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan bool // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan bool), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - w.mu.Unlock() - - // copy paths to remove while locked - w.mu.Lock() - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - var err error - for _, name := range pathsToRemove { - if e := w.Remove(name); e != nil && err == nil { - err = e - } - } - - // Send "quit" message to the reader goroutine: - w.done <- true - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - err := unix.Close(w.kq) - if err != nil { - w.Errors <- err - } - close(w.Events) - close(w.Errors) - return - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - w.Errors <- err - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel - w.Events <- event - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - w.Errors <- err - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - w.Events <- newCreateEvent(filePath) - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 7d8de14513ed..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 9139e17161bf..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f31d821..000000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de730..000000000000 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 0bd3c2b46267..000000000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index c02beacb9a41..000000000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: ", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: ", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 2eaf4d53a0c1..000000000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,14 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 84a85b1e88db..000000000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,18 +0,0 @@ -Anton Povarov -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS deleted file mode 100644 index b368efb7f2f8..000000000000 --- a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS +++ /dev/null @@ -1,5 +0,0 @@ -The contributors to the Go protobuf repository: - -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index 7be0cc7b62cf..000000000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,36 +0,0 @@ -Protocol Buffers for Go with Gadgets - -Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index 5d4cba4b51c6..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,234 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 737f2731d45d..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,978 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6fb74de4cc94..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,172 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - size := reflect.TypeOf(value).Elem().Size() - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - var zero field - setCustomType(newBas, zero, custom) - - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91cffb..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index 18e2a5f77654..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,203 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} - -func (o *Buffer) decDuration() (time.Duration, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return 0, err - } - dproto := &duration{} - if err := Unmarshal(b, dproto); err != nil { - return 0, err - } - return durationFromProto(dproto) -} - -func (o *Buffer) dec_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) - var zero field - setPtrCustomType(newBas, zero, &d) - return nil -} - -func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - structPointer_Word64Slice(base, p.field).Append(uint64(d)) - return nil -} - -func size_duration(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_duration(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_duration(p *Properties, base structPointer) (n int) { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return 0 - } - dproto := durationProto(*durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return errRepeatedHasNil - } - dproto := durationProto(*durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 2b30f84626ad..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 32111b7f41d7..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,350 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf596664..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 0dfcb538e8f1..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,693 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -type extensionRange interface { - Message - ExtensionRangeArray() []ExtensionRange -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() -var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() -var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extensionRange, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) - } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(pb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - *ext = append(*ext, p.buf...) - return nil - } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index ea6478f009d7..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,294 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" -) - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { - return 0, err - } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) - } - return n, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionsMap(m); err != nil { - return nil, err - } - return m[id].enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil - } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - return nil -} - -func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 7580bb45c617..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,898 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index 4b4f7c909e6b..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,42 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index fd982decd66e..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2e16dc..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 1763a5f227a0..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -package proto - -import ( - "reflect" -) - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - panic("not implemented") -} - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func structPointer_Add(p structPointer, size field) structPointer { - panic("not implemented") -} - -func structPointer_Len(p structPointer, f field) int { - panic("not implemented") -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - panic("not implemented") -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - panic("not implemented") -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - panic("not implemented") -} - -type structRefSlice struct{} - -func (v *structRefSlice) Len() int { - panic("not implemented") -} - -func (v *structRefSlice) Index(i int) structPointer { - panic("not implemented") -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d47cd3..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index f156a29f0e83..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,128 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() -} - -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() - newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - return &structRefSlice{p: p, f: f, size: size} -} - -// A structRefSlice represents a slice of structs (themselves submessages or groups). -type structRefSlice struct { - p structPointer - f field - size uintptr -} - -func (v *structRefSlice) Len() int { - return structPointer_Len(v.p, v.f) -} - -func (v *structRefSlice) Index(i int) structPointer { - ss := structPointer_GetStructPointer(v.p, v.f) - ss1 := structPointer_GetRefStructPointer(ss, 0) - return structPointer_Add(ss1, field(uintptr(i)*v.size)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 44b332052ef5..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,968 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - StdTime bool - StdDuration bool - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.setCustomEncAndDec(typ) - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTimeEncAndDec(typ) - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setDurationEncAndDec(typ) - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } - case reflect.Struct: - p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.mvalprop.CustomType = p.CustomType - p.mvalprop.StdDuration = p.StdDuration - p.mvalprop.StdTime = p.StdTime - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index b6b7176c5656..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,111 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "os" - "reflect" -) - -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setDurationEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_duration - p.dec = (*Buffer).dec_slice_duration - p.size = size_slice_duration - } else { - p.enc = (*Buffer).enc_slice_ref_duration - p.dec = (*Buffer).dec_slice_ref_duration - p.size = size_slice_ref_duration - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_duration - p.dec = (*Buffer).dec_duration - p.size = size_duration - } else { - p.enc = (*Buffer).enc_ref_duration - p.dec = (*Buffer).dec_ref_duration - p.size = size_ref_duration - } -} - -func (p *Properties) setTimeEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_time - p.dec = (*Buffer).dec_slice_time - p.size = size_slice_time - } else { - p.enc = (*Buffer).enc_slice_ref_time - p.dec = (*Buffer).dec_slice_ref_time - p.size = size_slice_ref_time - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_time - p.dec = (*Buffer).dec_time - p.size = size_time - } else { - p.enc = (*Buffer).enc_ref_time - p.dec = (*Buffer).dec_ref_time - p.size = size_ref_time - } - -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7c19..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index d63732fcbda5..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,928 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if pv.Type().Implements(extensionRangeType) { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - props.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), props) - props.StdTime = true - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - props.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), props) - props.StdDuration = true - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e41b..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index 9db12e96018e..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1013 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542bcf..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index d42764743606..000000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,229 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} - -func (o *Buffer) decTimestamp() (time.Time, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return time.Time{}, err - } - tproto := ×tamp{} - if err := Unmarshal(b, tproto); err != nil { - return time.Time{}, err - } - return timestampFromProto(tproto) -} - -func (o *Buffer) dec_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setPtrCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) - var zero field - setPtrCustomType(newBas, zero, &t) - return nil -} - -func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) - var zero field - setCustomType(newBas, zero, &t) - return nil -} - -func size_time(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_time(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_time(p *Properties, base structPointer) (n int) { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return 0 - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return errRepeatedHasNil - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go deleted file mode 100644 index ceadde6a5e10..000000000000 --- a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go +++ /dev/null @@ -1,101 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package sortkeys - -import ( - "sort" -) - -func Strings(l []string) { - sort.Strings(l) -} - -func Float64s(l []float64) { - sort.Float64s(l) -} - -func Float32s(l []float32) { - sort.Sort(Float32Slice(l)) -} - -func Int64s(l []int64) { - sort.Sort(Int64Slice(l)) -} - -func Int32s(l []int32) { - sort.Sort(Int32Slice(l)) -} - -func Uint64s(l []uint64) { - sort.Sort(Uint64Slice(l)) -} - -func Uint32s(l []uint32) { - sort.Sort(Uint32Slice(l)) -} - -func Bools(l []bool) { - sort.Sort(BoolSlice(l)) -} - -type BoolSlice []bool - -func (p BoolSlice) Len() int { return len(p) } -func (p BoolSlice) Less(i, j int) bool { return p[j] } -func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int32Slice []int32 - -func (p Int32Slice) Len() int { return len(p) } -func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint32Slice []uint32 - -func (p Uint32Slice) Len() int { return len(p) } -func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Float32Slice []float32 - -func (p Float32Slice) Len() int { return len(p) } -func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14fdc..000000000000 --- a/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 3e63fffd5ecb..000000000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1177 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d281110..000000000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a14fdc..000000000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index cdfe2991fd40..000000000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specificies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921efa6..000000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e392575b353a..000000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,229 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index aa207298f997..000000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,970 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 8b84d1b22d4c..000000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf596664..000000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index eaad21831263..000000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,587 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 1c225504a013..000000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,897 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index fd982decd66e..000000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2e16dc..000000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d47cd3..000000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index ec2289c0058e..000000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,872 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 965876bf033b..000000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,854 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 5e14513f28c9..000000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,895 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index b2af97f4a982..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,139 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { - return false - } - - return aname == proto.MessageName(pb) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index f34601723de2..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,178 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595da7ab1..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 65cb0f8eb5f3..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index b2410a098ebd..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 47f10dbc2ccc..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,134 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index e23e4a25dafc..000000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6ff062f9bb4e..000000000000 --- a/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go deleted file mode 100644 index cb95b7fa1bc6..000000000000 --- a/vendor/github.com/google/btree/btree_mem.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// This binary compares memory usage between btree and gollrb. -package main - -import ( - "flag" - "fmt" - "math/rand" - "runtime" - "time" - - "github.com/google/btree" - "github.com/petar/GoLLRB/llrb" -) - -var ( - size = flag.Int("size", 1000000, "size of the tree to build") - degree = flag.Int("degree", 8, "degree of btree") - gollrb = flag.Bool("llrb", false, "use llrb instead of btree") -) - -func main() { - flag.Parse() - vals := rand.Perm(*size) - var t, v interface{} - v = vals - var stats runtime.MemStats - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- BEFORE ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - start := time.Now() - if *gollrb { - tr := llrb.New() - for _, v := range vals { - tr.ReplaceOrInsert(llrb.Int(v)) - } - t = tr // keep it around - } else { - tr := btree.New(*degree) - for _, v := range vals { - tr.ReplaceOrInsert(btree.Int(v)) - } - t = tr // keep it around - } - fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) - fmt.Println("-------- AFTER ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- AFTER GC ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - if t == v { - fmt.Println("to make sure vals and tree aren't GC'd") - } -} diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE deleted file mode 100644 index 32017f8fa1d8..000000000000 --- a/vendor/github.com/google/go-cmp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go deleted file mode 100644 index 41bbddc61b22..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package cmpopts provides common options for the cmp package. -package cmpopts - -import ( - "math" - "reflect" - - "github.com/google/go-cmp/cmp" -) - -func equateAlways(_, _ interface{}) bool { return true } - -// EquateEmpty returns a Comparer option that determines all maps and slices -// with a length of zero to be equal, regardless of whether they are nil. -// -// EquateEmpty can be used in conjunction with SortSlices and SortMaps. -func EquateEmpty() cmp.Option { - return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) -} - -func isEmpty(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - return (x != nil && y != nil && vx.Type() == vy.Type()) && - (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && - (vx.Len() == 0 && vy.Len() == 0) -} - -// EquateApprox returns a Comparer option that determines float32 or float64 -// values to be equal if they are within a relative fraction or absolute margin. -// This option is not used when either x or y is NaN or infinite. -// -// The fraction determines that the difference of two values must be within the -// smaller fraction of the two values, while the margin determines that the two -// values must be within some absolute margin. -// To express only a fraction or only a margin, use 0 for the other parameter. -// The fraction and margin must be non-negative. -// -// The mathematical expression used is equivalent to: -// |x-y| ≤ max(fraction*min(|x|, |y|), margin) -// -// EquateApprox can be used in conjunction with EquateNaNs. -func EquateApprox(fraction, margin float64) cmp.Option { - if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { - panic("margin or fraction must be a non-negative number") - } - a := approximator{fraction, margin} - return cmp.Options{ - cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), - cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), - } -} - -type approximator struct{ frac, marg float64 } - -func areRealF64s(x, y float64) bool { - return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) -} -func areRealF32s(x, y float32) bool { - return areRealF64s(float64(x), float64(y)) -} -func (a approximator) compareF64(x, y float64) bool { - relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) - return math.Abs(x-y) <= math.Max(a.marg, relMarg) -} -func (a approximator) compareF32(x, y float32) bool { - return a.compareF64(float64(x), float64(y)) -} - -// EquateNaNs returns a Comparer option that determines float32 and float64 -// NaN values to be equal. -// -// EquateNaNs can be used in conjunction with EquateApprox. -func EquateNaNs() cmp.Option { - return cmp.Options{ - cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), - cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), - } -} - -func areNaNsF64s(x, y float64) bool { - return math.IsNaN(x) && math.IsNaN(y) -} -func areNaNsF32s(x, y float32) bool { - return areNaNsF64s(float64(x), float64(y)) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go deleted file mode 100644 index e86554b92b45..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmpopts - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp" -) - -// IgnoreFields returns an Option that ignores exported fields of the -// given names on a single struct type. -// The struct type is specified by passing in a value of that type. -// -// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a -// specific sub-field that is embedded or nested within the parent struct. -// -// This does not handle unexported fields; use IgnoreUnexported instead. -func IgnoreFields(typ interface{}, names ...string) cmp.Option { - sf := newStructFilter(typ, names...) - return cmp.FilterPath(sf.filter, cmp.Ignore()) -} - -// IgnoreTypes returns an Option that ignores all values assignable to -// certain types, which are specified by passing in a value of each type. -func IgnoreTypes(typs ...interface{}) cmp.Option { - tf := newTypeFilter(typs...) - return cmp.FilterPath(tf.filter, cmp.Ignore()) -} - -type typeFilter []reflect.Type - -func newTypeFilter(typs ...interface{}) (tf typeFilter) { - for _, typ := range typs { - t := reflect.TypeOf(typ) - if t == nil { - // This occurs if someone tries to pass in sync.Locker(nil) - panic("cannot determine type; consider using IgnoreInterfaces") - } - tf = append(tf, t) - } - return tf -} -func (tf typeFilter) filter(p cmp.Path) bool { - if len(p) < 1 { - return false - } - t := p.Last().Type() - for _, ti := range tf { - if t.AssignableTo(ti) { - return true - } - } - return false -} - -// IgnoreInterfaces returns an Option that ignores all values or references of -// values assignable to certain interface types. These interfaces are specified -// by passing in an anonymous struct with the interface types embedded in it. -// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}. -func IgnoreInterfaces(ifaces interface{}) cmp.Option { - tf := newIfaceFilter(ifaces) - return cmp.FilterPath(tf.filter, cmp.Ignore()) -} - -type ifaceFilter []reflect.Type - -func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) { - t := reflect.TypeOf(ifaces) - if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct { - panic("input must be an anonymous struct") - } - for i := 0; i < t.NumField(); i++ { - fi := t.Field(i) - switch { - case !fi.Anonymous: - panic("struct cannot have named fields") - case fi.Type.Kind() != reflect.Interface: - panic("embedded field must be an interface type") - case fi.Type.NumMethod() == 0: - // This matches everything; why would you ever want this? - panic("cannot ignore empty interface") - default: - tf = append(tf, fi.Type) - } - } - return tf -} -func (tf ifaceFilter) filter(p cmp.Path) bool { - if len(p) < 1 { - return false - } - t := p.Last().Type() - for _, ti := range tf { - if t.AssignableTo(ti) { - return true - } - if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) { - return true - } - } - return false -} - -// IgnoreUnexported returns an Option that only ignores the immediate unexported -// fields of a struct, including anonymous fields of unexported types. -// In particular, unexported fields within the struct's exported fields -// of struct types, including anonymous fields, will not be ignored unless the -// type of the field itself is also passed to IgnoreUnexported. -func IgnoreUnexported(typs ...interface{}) cmp.Option { - ux := newUnexportedFilter(typs...) - return cmp.FilterPath(ux.filter, cmp.Ignore()) -} - -type unexportedFilter struct{ m map[reflect.Type]bool } - -func newUnexportedFilter(typs ...interface{}) unexportedFilter { - ux := unexportedFilter{m: make(map[reflect.Type]bool)} - for _, typ := range typs { - t := reflect.TypeOf(typ) - if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) - } - ux.m[t] = true - } - return ux -} -func (xf unexportedFilter) filter(p cmp.Path) bool { - sf, ok := p.Index(-1).(cmp.StructField) - if !ok { - return false - } - return xf.m[p.Index(-2).Type()] && !isExported(sf.Name()) -} - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go deleted file mode 100644 index da17d7469384..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmpopts - -import ( - "fmt" - "reflect" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/internal/function" -) - -// SortSlices returns a Transformer option that sorts all []V. -// The less function must be of the form "func(T, T) bool" which is used to -// sort any slice with element type V that is assignable to T. -// -// The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) -// -// The less function does not have to be "total". That is, if !less(x, y) and -// !less(y, x) for two elements x and y, their relative order is maintained. -// -// SortSlices can be used in conjunction with EquateEmpty. -func SortSlices(less interface{}) cmp.Option { - vf := reflect.ValueOf(less) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", less)) - } - ss := sliceSorter{vf.Type().In(0), vf} - return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort)) -} - -type sliceSorter struct { - in reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (ss sliceSorter) filter(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - if !(x != nil && y != nil && vx.Type() == vy.Type()) || - !(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) || - (vx.Len() <= 1 && vy.Len() <= 1) { - return false - } - // Check whether the slices are already sorted to avoid an infinite - // recursion cycle applying the same transform to itself. - ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) }) - ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) }) - return !ok1 || !ok2 -} -func (ss sliceSorter) sort(x interface{}) interface{} { - src := reflect.ValueOf(x) - dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len()) - for i := 0; i < src.Len(); i++ { - dst.Index(i).Set(src.Index(i)) - } - sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) }) - ss.checkSort(dst) - return dst.Interface() -} -func (ss sliceSorter) checkSort(v reflect.Value) { - start := -1 // Start of a sequence of equal elements. - for i := 1; i < v.Len(); i++ { - if ss.less(v, i-1, i) { - // Check that first and last elements in v[start:i] are equal. - if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { - panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) - } - start = -1 - } else if start == -1 { - start = i - } - } -} -func (ss sliceSorter) less(v reflect.Value, i, j int) bool { - vx, vy := v.Index(i), v.Index(j) - return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() -} - -// SortMaps returns a Transformer option that flattens map[K]V types to be a -// sorted []struct{K, V}. The less function must be of the form -// "func(T, T) bool" which is used to sort any map with key K that is -// assignable to T. -// -// Flattening the map into a slice has the property that cmp.Equal is able to -// use Comparers on K or the K.Equal method if it exists. -// -// The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) -// • Total: if x != y, then either less(x, y) or less(y, x) -// -// SortMaps can be used in conjunction with EquateEmpty. -func SortMaps(less interface{}) cmp.Option { - vf := reflect.ValueOf(less) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", less)) - } - ms := mapSorter{vf.Type().In(0), vf} - return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort)) -} - -type mapSorter struct { - in reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (ms mapSorter) filter(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - return (x != nil && y != nil && vx.Type() == vy.Type()) && - (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && - (vx.Len() != 0 || vy.Len() != 0) -} -func (ms mapSorter) sort(x interface{}) interface{} { - src := reflect.ValueOf(x) - outType := mapEntryType(src.Type()) - dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) - for i, k := range src.MapKeys() { - v := reflect.New(outType).Elem() - v.Field(0).Set(k) - v.Field(1).Set(src.MapIndex(k)) - dst.Index(i).Set(v) - } - sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) - ms.checkSort(dst) - return dst.Interface() -} -func (ms mapSorter) checkSort(v reflect.Value) { - for i := 1; i < v.Len(); i++ { - if !ms.less(v, i-1, i) { - panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) - } - } -} -func (ms mapSorter) less(v reflect.Value, i, j int) bool { - vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) - if !hasReflectStructOf { - vx, vy = vx.Elem(), vy.Elem() - } - return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go deleted file mode 100644 index 839b88ca4020..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !go1.8 - -package cmpopts - -import ( - "reflect" - "sort" -) - -const hasReflectStructOf = false - -func mapEntryType(reflect.Type) reflect.Type { - return reflect.TypeOf(struct{ K, V interface{} }{}) -} - -func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool { - return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less}) -} -func sortSlice(slice interface{}, less func(i, j int) bool) { - sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less}) -} -func sortSliceStable(slice interface{}, less func(i, j int) bool) { - sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less}) -} - -type reflectSliceSorter struct { - slice reflect.Value - less func(i, j int) bool -} - -func (ss reflectSliceSorter) Len() int { - return ss.slice.Len() -} -func (ss reflectSliceSorter) Less(i, j int) bool { - return ss.less(i, j) -} -func (ss reflectSliceSorter) Swap(i, j int) { - vi := ss.slice.Index(i).Interface() - vj := ss.slice.Index(j).Interface() - ss.slice.Index(i).Set(reflect.ValueOf(vj)) - ss.slice.Index(j).Set(reflect.ValueOf(vi)) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go deleted file mode 100644 index 8a59c0d38fdd..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build go1.8 - -package cmpopts - -import ( - "reflect" - "sort" -) - -const hasReflectStructOf = true - -func mapEntryType(t reflect.Type) reflect.Type { - return reflect.StructOf([]reflect.StructField{ - {Name: "K", Type: t.Key()}, - {Name: "V", Type: t.Elem()}, - }) -} - -func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool { - return sort.SliceIsSorted(slice, less) -} -func sortSlice(slice interface{}, less func(i, j int) bool) { - sort.Slice(slice, less) -} -func sortSliceStable(slice interface{}, less func(i, j int) bool) { - sort.SliceStable(slice, less) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go deleted file mode 100644 index 97f707983c04..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmpopts - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp" -) - -// filterField returns a new Option where opt is only evaluated on paths that -// include a specific exported field on a single struct type. -// The struct type is specified by passing in a value of that type. -// -// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a -// specific sub-field that is embedded or nested within the parent struct. -func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { - // TODO: This is currently unexported over concerns of how helper filters - // can be composed together easily. - // TODO: Add tests for FilterField. - - sf := newStructFilter(typ, name) - return cmp.FilterPath(sf.filter, opt) -} - -type structFilter struct { - t reflect.Type // The root struct type to match on - ft fieldTree // Tree of fields to match on -} - -func newStructFilter(typ interface{}, names ...string) structFilter { - // TODO: Perhaps allow * as a special identifier to allow ignoring any - // number of path steps until the next field match? - // This could be useful when a concrete struct gets transformed into - // an anonymous struct where it is not possible to specify that by type, - // but the transformer happens to provide guarantees about the names of - // the transformed fields. - - t := reflect.TypeOf(typ) - if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T must be a struct", typ)) - } - var ft fieldTree - for _, name := range names { - cname, err := canonicalName(t, name) - if err != nil { - panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) - } - ft.insert(cname) - } - return structFilter{t, ft} -} - -func (sf structFilter) filter(p cmp.Path) bool { - for i, ps := range p { - if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { - return true - } - } - return false -} - -// fieldTree represents a set of dot-separated identifiers. -// -// For example, inserting the following selectors: -// Foo -// Foo.Bar.Baz -// Foo.Buzz -// Nuka.Cola.Quantum -// -// Results in a tree of the form: -// {sub: { -// "Foo": {ok: true, sub: { -// "Bar": {sub: { -// "Baz": {ok: true}, -// }}, -// "Buzz": {ok: true}, -// }}, -// "Nuka": {sub: { -// "Cola": {sub: { -// "Quantum": {ok: true}, -// }}, -// }}, -// }} -type fieldTree struct { - ok bool // Whether this is a specified node - sub map[string]fieldTree // The sub-tree of fields under this node -} - -// insert inserts a sequence of field accesses into the tree. -func (ft *fieldTree) insert(cname []string) { - if ft.sub == nil { - ft.sub = make(map[string]fieldTree) - } - if len(cname) == 0 { - ft.ok = true - return - } - sub := ft.sub[cname[0]] - sub.insert(cname[1:]) - ft.sub[cname[0]] = sub -} - -// matchPrefix reports whether any selector in the fieldTree matches -// the start of path p. -func (ft fieldTree) matchPrefix(p cmp.Path) bool { - for _, ps := range p { - switch ps := ps.(type) { - case cmp.StructField: - ft = ft.sub[ps.Name()] - if ft.ok { - return true - } - if len(ft.sub) == 0 { - return false - } - case cmp.Indirect: - default: - return false - } - } - return false -} - -// canonicalName returns a list of identifiers where any struct field access -// through an embedded field is expanded to include the names of the embedded -// types themselves. -// -// For example, suppose field "Foo" is not directly in the parent struct, -// but actually from an embedded struct of type "Bar". Then, the canonical name -// of "Foo" is actually "Bar.Foo". -// -// Suppose field "Foo" is not directly in the parent struct, but actually -// a field in two different embedded structs of types "Bar" and "Baz". -// Then the selector "Foo" causes a panic since it is ambiguous which one it -// refers to. The user must specify either "Bar.Foo" or "Baz.Foo". -func canonicalName(t reflect.Type, sel string) ([]string, error) { - var name string - sel = strings.TrimPrefix(sel, ".") - if sel == "" { - return nil, fmt.Errorf("name must not be empty") - } - if i := strings.IndexByte(sel, '.'); i < 0 { - name, sel = sel, "" - } else { - name, sel = sel[:i], sel[i:] - } - - // Type must be a struct or pointer to struct. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("%v must be a struct", t) - } - - // Find the canonical name for this current field name. - // If the field exists in an embedded struct, then it will be expanded. - if !isExported(name) { - // Disallow unexported fields: - // * To discourage people from actually touching unexported fields - // * FieldByName is buggy (https://golang.org/issue/4876) - return []string{name}, fmt.Errorf("name must be exported") - } - sf, ok := t.FieldByName(name) - if !ok { - return []string{name}, fmt.Errorf("does not exist") - } - var ss []string - for i := range sf.Index { - ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) - } - if sel == "" { - return ss, nil - } - ssPost, err := canonicalName(sf.Type, sel) - return append(ss, ssPost...), err -} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go deleted file mode 100644 index 7e215f220296..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package cmp determines equality of values. -// -// This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. -// -// The primary features of cmp are: -// -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. -// -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. -// -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared -// using the AllowUnexported option. -package cmp - -import ( - "fmt" - "reflect" - - "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/function" - "github.com/google/go-cmp/cmp/internal/value" -) - -// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to -// the reflection package's inability to retrieve such entries. Equal will panic -// anytime it comes across a NaN key, but this behavior may change. -// -// See https://golang.org/issue/11104 for more details. - -var nothing = reflect.Value{} - -// Equal reports whether x and y are equal by recursively applying the -// following rules in the given order to x and y and all of their sub-values: -// -// • If two values are not of the same type, then they are never equal -// and the overall result is false. -// -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. -// -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. -// Otherwise, no such method exists and evaluation proceeds to the next rule. -// -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. -// Pointers are equal if the underlying values they point to are also equal. -// Interfaces are equal if their underlying concrete values are also equal. -// -// Structs are equal if all of their fields are equal. If a struct contains -// unexported fields, Equal panics unless the AllowUnexported option is used or -// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. -// -// Arrays, slices, and maps are equal if they are both nil or both non-nil -// with the same length and the elements at each index or key are equal. -// Note that a non-nil empty slice and a nil slice are not equal. -// To equate empty slices and maps, consider using cmpopts.EquateEmpty. -// Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. -func Equal(x, y interface{}, opts ...Option) bool { - s := newState(opts) - s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. The output string will use the "-" symbol to -// indicate elements removed from x, and the "+" symbol to indicate elements -// added to y. -// -// Do not depend on this output being stable. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - opts = Options{Options(opts), r} - eq := Equal(x, y, opts...) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d -} - -type state struct { - // These fields represent the "comparison state". - // Calling statelessCompare must not result in observable changes to these. - result diff.Result // The current result of comparison - curPath Path // The current path in the value tree - reporter reporter // Optional reporter used for difference formatting - - // dynChecker triggers pseudo-random checks for option correctness. - // It is safe for statelessCompare to mutate this value. - dynChecker dynChecker - - // These fields, once set by processOption, will not change. - exporters map[reflect.Type]bool // Set of structs with unexported field visibility - opts Options // List of all fundamental and filter options -} - -func newState(opts []Option) *state { - s := new(state) - for _, opt := range opts { - s.processOption(opt) - } - return s -} - -func (s *state) processOption(opt Option) { - switch opt := opt.(type) { - case nil: - case Options: - for _, o := range opt { - s.processOption(o) - } - case coreOption: - type filtered interface { - isFiltered() bool - } - if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { - panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) - } - s.opts = append(s.opts, opt) - case visibleStructs: - if s.exporters == nil { - s.exporters = make(map[reflect.Type]bool) - } - for t := range opt { - s.exporters[t] = true - } - case reporter: - if s.reporter != nil { - panic("difference reporter already registered") - } - s.reporter = opt - default: - panic(fmt.Sprintf("unknown option %T", opt)) - } -} - -// statelessCompare compares two values and returns the result. -// This function is stateless in that it does not alter the current result, -// or output to any registered reporters. -func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { - // We do not save and restore the curPath because all of the compareX - // methods should properly push and pop from the path. - // It is an implementation bug if the contents of curPath differs from - // when calling this function to when returning from it. - - oldResult, oldReporter := s.result, s.reporter - s.result = diff.Result{} // Reset result - s.reporter = nil // Remove reporter to avoid spurious printouts - s.compareAny(vx, vy) - res := s.result - s.result, s.reporter = oldResult, oldReporter - return res -} - -func (s *state) compareAny(vx, vy reflect.Value) { - // TODO: Support cyclic data structures. - - // Rule 0: Differing types are never equal. - if !vx.IsValid() || !vy.IsValid() { - s.report(vx.IsValid() == vy.IsValid(), vx, vy) - return - } - if vx.Type() != vy.Type() { - s.report(false, vx, vy) // Possible for path to be empty - return - } - t := vx.Type() - if len(s.curPath) == 0 { - s.curPath.push(&pathStep{typ: t}) - defer s.curPath.pop() - } - vx, vy = s.tryExporting(vx, vy) - - // Rule 1: Check whether an option applies on this node in the value tree. - if s.tryOptions(vx, vy, t) { - return - } - - // Rule 2: Check whether the type has a valid Equal method. - if s.tryMethod(vx, vy, t) { - return - } - - // Rule 3: Recursively descend into each value's underlying kind. - switch t.Kind() { - case reflect.Bool: - s.report(vx.Bool() == vy.Bool(), vx, vy) - return - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s.report(vx.Int() == vy.Int(), vx, vy) - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s.report(vx.Uint() == vy.Uint(), vx, vy) - return - case reflect.Float32, reflect.Float64: - s.report(vx.Float() == vy.Float(), vx, vy) - return - case reflect.Complex64, reflect.Complex128: - s.report(vx.Complex() == vy.Complex(), vx, vy) - return - case reflect.String: - s.report(vx.String() == vy.String(), vx, vy) - return - case reflect.Chan, reflect.UnsafePointer: - s.report(vx.Pointer() == vy.Pointer(), vx, vy) - return - case reflect.Func: - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - case reflect.Ptr: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - s.curPath.push(&indirect{pathStep{t.Elem()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return - case reflect.Interface: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - if vx.Elem().Type() != vy.Elem().Type() { - s.report(false, vx.Elem(), vy.Elem()) - return - } - s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return - case reflect.Slice: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - fallthrough - case reflect.Array: - s.compareArray(vx, vy, t) - return - case reflect.Map: - s.compareMap(vx, vy, t) - return - case reflect.Struct: - s.compareStruct(vx, vy, t) - return - default: - panic(fmt.Sprintf("%v kind not handled", t.Kind())) - } -} - -func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { - if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported { - if sf.force { - // Use unsafe pointer arithmetic to get read-write access to an - // unexported field in the struct. - vx = unsafeRetrieveField(sf.pvx, sf.field) - vy = unsafeRetrieveField(sf.pvy, sf.field) - } else { - // We are not allowed to export the value, so invalidate them - // so that tryOptions can panic later if not explicitly ignored. - vx = nothing - vy = nothing - } - } - return vx, vy -} - -func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool { - // If there were no FilterValues, we will not detect invalid inputs, - // so manually check for them and append invalid if necessary. - // We still evaluate the options since an ignore can override invalid. - opts := s.opts - if !vx.IsValid() || !vy.IsValid() { - opts = Options{opts, invalid{}} - } - - // Evaluate all filters and apply the remaining options. - if opt := opts.filter(s, vx, vy, t); opt != nil { - opt.apply(s, vx, vy) - return true - } - return false -} - -func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { - // Check if this type even has an Equal method. - m, ok := t.MethodByName("Equal") - if !ok || !function.IsType(m.Type, function.EqualAssignable) { - return false - } - - eq := s.callTTBFunc(m.Func, vx, vy) - s.report(eq, vx, vy) - return true -} - -func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{v})[0] - } - - // Run the function twice and ensure that we get the same results back. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, v) - want := f.Call([]reflect.Value{v})[0] - if got := <-c; !s.statelessCompare(got, want).Equal() { - // To avoid false-positives with non-reflexive equality operations, - // we sanity check whether a value is equal to itself. - if !s.statelessCompare(want, want).Equal() { - return want - } - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic function detected: %s", fn)) - } - return want -} - -func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{x, y})[0].Bool() - } - - // Swapping the input arguments is sufficient to check that - // f is symmetric and deterministic. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, y, x) - want := f.Call([]reflect.Value{x, y})[0].Bool() - if got := <-c; !got.IsValid() || got.Bool() != want { - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn)) - } - return want -} - -func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { - var ret reflect.Value - defer func() { - recover() // Ignore panics, let the other call to f panic instead - c <- ret - }() - ret = f.Call(vs)[0] -} - -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Remove this hacky workaround. - // See https://golang.org/issue/22143 - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - return v -} - -func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { - step := &sliceIndex{pathStep{t.Elem()}, 0, 0} - s.curPath.push(step) - - // Compute an edit-script for slices vx and vy. - es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { - step.xkey, step.ykey = ix, iy - return s.statelessCompare(vx.Index(ix), vy.Index(iy)) - }) - - // Report the entire slice as is if the arrays are of primitive kind, - // and the arrays are different enough. - isPrimitive := false - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - isPrimitive = true - } - if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 { - s.curPath.pop() // Pop first since we are reporting the whole slice - s.report(false, vx, vy) - return - } - - // Replay the edit-script. - var ix, iy int - for _, e := range es { - switch e { - case diff.UniqueX: - step.xkey, step.ykey = ix, -1 - s.report(false, vx.Index(ix), nothing) - ix++ - case diff.UniqueY: - step.xkey, step.ykey = -1, iy - s.report(false, nothing, vy.Index(iy)) - iy++ - default: - step.xkey, step.ykey = ix, iy - if e == diff.Identity { - s.report(true, vx.Index(ix), vy.Index(iy)) - } else { - s.compareAny(vx.Index(ix), vy.Index(iy)) - } - ix++ - iy++ - } - } - s.curPath.pop() - return -} - -func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - - // We combine and sort the two map keys so that we can perform the - // comparisons in a deterministic order. - step := &mapIndex{pathStep: pathStep{t.Elem()}} - s.curPath.push(step) - defer s.curPath.pop() - for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { - step.key = k - vvx := vx.MapIndex(k) - vvy := vy.MapIndex(k) - switch { - case vvx.IsValid() && vvy.IsValid(): - s.compareAny(vvx, vvy) - case vvx.IsValid() && !vvy.IsValid(): - s.report(false, vvx, nothing) - case !vvx.IsValid() && vvy.IsValid(): - s.report(false, nothing, vvy) - default: - // It is possible for both vvx and vvy to be invalid if the - // key contained a NaN value in it. There is no way in - // reflection to be able to retrieve these values. - // See https://golang.org/issue/11104 - panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) - } - } -} - -func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { - var vax, vay reflect.Value // Addressable versions of vx and vy - - step := &structField{} - s.curPath.push(step) - defer s.curPath.pop() - for i := 0; i < t.NumField(); i++ { - vvx := vx.Field(i) - vvy := vy.Field(i) - step.typ = t.Field(i).Type - step.name = t.Field(i).Name - step.idx = i - step.unexported = !isExported(step.name) - if step.unexported { - // Defer checking of unexported fields until later to give an - // Ignore a chance to ignore the field. - if !vax.IsValid() || !vay.IsValid() { - // For unsafeRetrieveField to work, the parent struct must - // be addressable. Create a new copy of the values if - // necessary to make them addressable. - vax = makeAddressable(vx) - vay = makeAddressable(vy) - } - step.force = s.exporters[t] - step.pvx = vax - step.pvy = vay - step.field = t.Field(i) - } - s.compareAny(vvx, vvy) - } -} - -// report records the result of a single comparison. -// It also calls Report if any reporter is registered. -func (s *state) report(eq bool, vx, vy reflect.Value) { - if eq { - s.result.NSame++ - } else { - s.result.NDiff++ - } - if s.reporter != nil { - s.reporter.Report(vx, vy, eq, s.curPath) - } -} - -// dynChecker tracks the state needed to periodically perform checks that -// user provided functions are symmetric and deterministic. -// The zero value is safe for immediate use. -type dynChecker struct{ curr, next int } - -// Next increments the state and reports whether a check should be performed. -// -// Checks occur every Nth function call, where N is a triangular number: -// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... -// See https://en.wikipedia.org/wiki/Triangular_number -// -// This sequence ensures that the cost of checks drops significantly as -// the number of functions calls grows larger. -func (dc *dynChecker) Next() bool { - ok := dc.curr == dc.next - if ok { - dc.curr = 0 - dc.next++ - } - dc.curr++ - return ok -} - -// makeAddressable returns a value that is always addressable. -// It returns the input verbatim if it is already addressable, -// otherwise it creates a new value and returns an addressable copy. -func makeAddressable(v reflect.Value) reflect.Value { - if v.CanAddr() { - return v - } - vc := reflect.New(v.Type()).Elem() - vc.Set(v) - return vc -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go deleted file mode 100644 index 42afa4960efa..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !debug - -package diff - -var debug debugger - -type debugger struct{} - -func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { - return f -} -func (debugger) Update() {} -func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go deleted file mode 100644 index fd9f7f177399..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build debug - -package diff - -import ( - "fmt" - "strings" - "sync" - "time" -) - -// The algorithm can be seen running in real-time by enabling debugging: -// go test -tags=debug -v -// -// Example output: -// === RUN TestDifference/#34 -// ┌───────────────────────────────┐ -// │ \ · · · · · · · · · · · · · · │ -// │ · # · · · · · · · · · · · · · │ -// │ · \ · · · · · · · · · · · · · │ -// │ · · \ · · · · · · · · · · · · │ -// │ · · · X # · · · · · · · · · · │ -// │ · · · # \ · · · · · · · · · · │ -// │ · · · · · # # · · · · · · · · │ -// │ · · · · · # \ · · · · · · · · │ -// │ · · · · · · · \ · · · · · · · │ -// │ · · · · · · · · \ · · · · · · │ -// │ · · · · · · · · · \ · · · · · │ -// │ · · · · · · · · · · \ · · # · │ -// │ · · · · · · · · · · · \ # # · │ -// │ · · · · · · · · · · · # # # · │ -// │ · · · · · · · · · · # # # # · │ -// │ · · · · · · · · · # # # # # · │ -// │ · · · · · · · · · · · · · · \ │ -// └───────────────────────────────┘ -// [.Y..M.XY......YXYXY.|] -// -// The grid represents the edit-graph where the horizontal axis represents -// list X and the vertical axis represents list Y. The start of the two lists -// is the top-left, while the ends are the bottom-right. The '·' represents -// an unexplored node in the graph. The '\' indicates that the two symbols -// from list X and Y are equal. The 'X' indicates that two symbols are similar -// (but not exactly equal) to each other. The '#' indicates that the two symbols -// are different (and not similar). The algorithm traverses this graph trying to -// make the paths starting in the top-left and the bottom-right connect. -// -// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents -// the currently established path from the forward and reverse searches, -// separated by a '|' character. - -const ( - updateDelay = 100 * time.Millisecond - finishDelay = 500 * time.Millisecond - ansiTerminal = true // ANSI escape codes used to move terminal cursor -) - -var debug debugger - -type debugger struct { - sync.Mutex - p1, p2 EditScript - fwdPath, revPath *EditScript - grid []byte - lines int -} - -func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { - dbg.Lock() - dbg.fwdPath, dbg.revPath = p1, p2 - top := "┌─" + strings.Repeat("──", nx) + "┐\n" - row := "│ " + strings.Repeat("· ", nx) + "│\n" - btm := "└─" + strings.Repeat("──", nx) + "┘\n" - dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) - dbg.lines = strings.Count(dbg.String(), "\n") - fmt.Print(dbg) - - // Wrap the EqualFunc so that we can intercept each result. - return func(ix, iy int) (r Result) { - cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] - for i := range cell { - cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot - } - switch r = f(ix, iy); { - case r.Equal(): - cell[0] = '\\' - case r.Similar(): - cell[0] = 'X' - default: - cell[0] = '#' - } - return - } -} - -func (dbg *debugger) Update() { - dbg.print(updateDelay) -} - -func (dbg *debugger) Finish() { - dbg.print(finishDelay) - dbg.Unlock() -} - -func (dbg *debugger) String() string { - dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] - for i := len(*dbg.revPath) - 1; i >= 0; i-- { - dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) - } - return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) -} - -func (dbg *debugger) print(d time.Duration) { - if ansiTerminal { - fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor - } - fmt.Print(dbg) - time.Sleep(d) -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go deleted file mode 100644 index 260befea2fd7..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package diff implements an algorithm for producing edit-scripts. -// The edit-script is a sequence of operations needed to transform one list -// of symbols into another (or vice-versa). The edits allowed are insertions, -// deletions, and modifications. The summation of all edits is called the -// Levenshtein distance as this problem is well-known in computer science. -// -// This package prioritizes performance over accuracy. That is, the run time -// is more important than obtaining a minimal Levenshtein distance. -package diff - -// EditType represents a single operation within an edit-script. -type EditType uint8 - -const ( - // Identity indicates that a symbol pair is identical in both list X and Y. - Identity EditType = iota - // UniqueX indicates that a symbol only exists in X and not Y. - UniqueX - // UniqueY indicates that a symbol only exists in Y and not X. - UniqueY - // Modified indicates that a symbol pair is a modification of each other. - Modified -) - -// EditScript represents the series of differences between two lists. -type EditScript []EditType - -// String returns a human-readable string representing the edit-script where -// Identity, UniqueX, UniqueY, and Modified are represented by the -// '.', 'X', 'Y', and 'M' characters, respectively. -func (es EditScript) String() string { - b := make([]byte, len(es)) - for i, e := range es { - switch e { - case Identity: - b[i] = '.' - case UniqueX: - b[i] = 'X' - case UniqueY: - b[i] = 'Y' - case Modified: - b[i] = 'M' - default: - panic("invalid edit-type") - } - } - return string(b) -} - -// stats returns a histogram of the number of each type of edit operation. -func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { - for _, e := range es { - switch e { - case Identity: - s.NI++ - case UniqueX: - s.NX++ - case UniqueY: - s.NY++ - case Modified: - s.NM++ - default: - panic("invalid edit-type") - } - } - return -} - -// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if -// lists X and Y are equal. -func (es EditScript) Dist() int { return len(es) - es.stats().NI } - -// LenX is the length of the X list. -func (es EditScript) LenX() int { return len(es) - es.stats().NY } - -// LenY is the length of the Y list. -func (es EditScript) LenY() int { return len(es) - es.stats().NX } - -// EqualFunc reports whether the symbols at indexes ix and iy are equal. -// When called by Difference, the index is guaranteed to be within nx and ny. -type EqualFunc func(ix int, iy int) Result - -// Result is the result of comparison. -// NSame is the number of sub-elements that are equal. -// NDiff is the number of sub-elements that are not equal. -type Result struct{ NSame, NDiff int } - -// Equal indicates whether the symbols are equal. Two symbols are equal -// if and only if NDiff == 0. If Equal, then they are also Similar. -func (r Result) Equal() bool { return r.NDiff == 0 } - -// Similar indicates whether two symbols are similar and may be represented -// by using the Modified type. As a special case, we consider binary comparisons -// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. -// -// The exact ratio of NSame to NDiff to determine similarity may change. -func (r Result) Similar() bool { - // Use NSame+1 to offset NSame so that binary comparisons are similar. - return r.NSame+1 >= r.NDiff -} - -// Difference reports whether two lists of lengths nx and ny are equal -// given the definition of equality provided as f. -// -// This function returns an edit-script, which is a sequence of operations -// needed to convert one list into the other. The following invariants for -// the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() -// -// This algorithm is not guaranteed to be an optimal solution (i.e., one that -// produces an edit-script with a minimal Levenshtein distance). This algorithm -// favors performance over optimality. The exact output is not guaranteed to -// be stable and may change over time. -func Difference(nx, ny int, f EqualFunc) (es EditScript) { - // This algorithm is based on traversing what is known as an "edit-graph". - // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" - // by Eugene W. Myers. Since D can be as large as N itself, this is - // effectively O(N^2). Unlike the algorithm from that paper, we are not - // interested in the optimal path, but at least some "decent" path. - // - // For example, let X and Y be lists of symbols: - // X = [A B C A B B A] - // Y = [C B A B A C] - // - // The edit-graph can be drawn as the following: - // A B C A B B A - // ┌─────────────┐ - // C │_|_|\|_|_|_|_│ 0 - // B │_|\|_|_|\|\|_│ 1 - // A │\|_|_|\|_|_|\│ 2 - // B │_|\|_|_|\|\|_│ 3 - // A │\|_|_|\|_|_|\│ 4 - // C │ | |\| | | | │ 5 - // └─────────────┘ 6 - // 0 1 2 3 4 5 6 7 - // - // List X is written along the horizontal axis, while list Y is written - // along the vertical axis. At any point on this grid, if the symbol in - // list X matches the corresponding symbol in list Y, then a '\' is drawn. - // The goal of any minimal edit-script algorithm is to find a path from the - // top-left corner to the bottom-right corner, while traveling through the - // fewest horizontal or vertical edges. - // A horizontal edge is equivalent to inserting a symbol from list X. - // A vertical edge is equivalent to inserting a symbol from list Y. - // A diagonal edge is equivalent to a matching symbol between both X and Y. - - // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny - // - // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y - // Unless, it is time for the algorithm to terminate. - fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} - revPath := path{-1, point{nx, ny}, make(EditScript, 0)} - fwdFrontier := fwdPath.point // Forward search frontier - revFrontier := revPath.point // Reverse search frontier - - // Search budget bounds the cost of searching for better paths. - // The longest sequence of non-matching symbols that can be tolerated is - // approximately the square-root of the search budget. - searchBudget := 4 * (nx + ny) // O(n) - - // The algorithm below is a greedy, meet-in-the-middle algorithm for - // computing sub-optimal edit-scripts between two lists. - // - // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. - // - // This algorithm is correct even if searching only in the forward direction - // or in the reverse direction. We do both because it is commonly observed - // that two lists commonly differ because elements were added to the front - // or end of the other list. - // - // Running the tests with the "debug" build tag prints a visualization of - // the algorithm running in real-time. This is educational for understanding - // how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - for { - // Forward search from the beginning. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{fwdFrontier.X + z, fwdFrontier.Y - z} - switch { - case p.X >= revPath.X || p.Y < fwdPath.Y: - stop1 = true // Hit top-right corner - case p.Y >= revPath.Y || p.X < fwdPath.X: - stop2 = true // Hit bottom-left corner - case f(p.X, p.Y).Equal(): - // Match found, so connect the path to this point. - fwdPath.connect(p, f) - fwdPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(fwdPath.X, fwdPath.Y).Equal() { - break - } - fwdPath.append(Identity) - } - fwdFrontier = fwdPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards reverse point. - if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { - fwdFrontier.X++ - } else { - fwdFrontier.Y++ - } - - // Reverse search from the end. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{revFrontier.X - z, revFrontier.Y + z} - switch { - case fwdPath.X >= p.X || revPath.Y < p.Y: - stop1 = true // Hit bottom-left corner - case fwdPath.Y >= p.Y || revPath.X < p.X: - stop2 = true // Hit top-right corner - case f(p.X-1, p.Y-1).Equal(): - // Match found, so connect the path to this point. - revPath.connect(p, f) - revPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(revPath.X-1, revPath.Y-1).Equal() { - break - } - revPath.append(Identity) - } - revFrontier = revPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards forward point. - if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { - revFrontier.X-- - } else { - revFrontier.Y-- - } - } - - // Join the forward and reverse paths and then append the reverse path. - fwdPath.connect(revPath.point, f) - for i := len(revPath.es) - 1; i >= 0; i-- { - t := revPath.es[i] - revPath.es = revPath.es[:i] - fwdPath.append(t) - } - debug.Finish() - return fwdPath.es -} - -type path struct { - dir int // +1 if forward, -1 if reverse - point // Leading point of the EditScript path - es EditScript -} - -// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types -// to the edit-script to connect p.point to dst. -func (p *path) connect(dst point, f EqualFunc) { - if p.dir > 0 { - // Connect in forward direction. - for dst.X > p.X && dst.Y > p.Y { - switch r := f(p.X, p.Y); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case dst.X-p.X >= dst.Y-p.Y: - p.append(UniqueX) - default: - p.append(UniqueY) - } - } - for dst.X > p.X { - p.append(UniqueX) - } - for dst.Y > p.Y { - p.append(UniqueY) - } - } else { - // Connect in reverse direction. - for p.X > dst.X && p.Y > dst.Y { - switch r := f(p.X-1, p.Y-1); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case p.Y-dst.Y >= p.X-dst.X: - p.append(UniqueY) - default: - p.append(UniqueX) - } - } - for p.X > dst.X { - p.append(UniqueX) - } - for p.Y > dst.Y { - p.append(UniqueY) - } - } -} - -func (p *path) append(t EditType) { - p.es = append(p.es, t) - switch t { - case Identity, Modified: - p.add(p.dir, p.dir) - case UniqueX: - p.add(p.dir, 0) - case UniqueY: - p.add(0, p.dir) - } - debug.Update() -} - -type point struct{ X, Y int } - -func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } - -// zigzag maps a consecutive sequence of integers to a zig-zag sequence. -// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] -func zigzag(x int) int { - if x&1 != 0 { - x = ^x - } - return x >> 1 -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go deleted file mode 100644 index 4c35ff11ee13..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package function identifies function types. -package function - -import "reflect" - -type funcType int - -const ( - _ funcType = iota - - ttbFunc // func(T, T) bool - tibFunc // func(T, I) bool - trFunc // func(T) R - - Equal = ttbFunc // func(T, T) bool - EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool - Transformer = trFunc // func(T) R - ValueFilter = ttbFunc // func(T, T) bool - Less = ttbFunc // func(T, T) bool -) - -var boolType = reflect.TypeOf(true) - -// IsType reports whether the reflect.Type is of the specified function type. -func IsType(t reflect.Type, ft funcType) bool { - if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { - return false - } - ni, no := t.NumIn(), t.NumOut() - switch ft { - case ttbFunc: // func(T, T) bool - if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { - return true - } - case tibFunc: // func(T, I) bool - if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { - return true - } - case trFunc: // func(T) R - if ni == 1 && no == 1 { - return true - } - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go deleted file mode 100644 index 657e508779db..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package value provides functionality for reflect.Value types. -package value - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "unicode" -) - -var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - -// Format formats the value v as a string. -// -// This is similar to fmt.Sprintf("%+v", v) except this: -// * Prints the type unless it can be elided -// * Avoids printing struct fields that are zero -// * Prints a nil-slice as being nil, not empty -// * Prints map entries in deterministic order -func Format(v reflect.Value, conf FormatConfig) string { - conf.printType = true - conf.followPointers = true - conf.realPointers = true - return formatAny(v, conf, nil) -} - -type FormatConfig struct { - UseStringer bool // Should the String method be used if available? - printType bool // Should we print the type before the value? - PrintPrimitiveType bool // Should we print the type of primitives? - followPointers bool // Should we recursively follow pointers? - realPointers bool // Should we print the real address of pointers? -} - -func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string { - // TODO: Should this be a multi-line printout in certain situations? - - if !v.IsValid() { - return "" - } - if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() { - if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { - return "" - } - - const stringerPrefix = "s" // Indicates that the String method was used - s := v.Interface().(fmt.Stringer).String() - return stringerPrefix + formatString(s) - } - - switch v.Kind() { - case reflect.Bool: - return formatPrimitive(v.Type(), v.Bool(), conf) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return formatPrimitive(v.Type(), v.Int(), conf) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr { - // Unnamed uints are usually bytes or words, so use hexadecimal. - return formatPrimitive(v.Type(), formatHex(v.Uint()), conf) - } - return formatPrimitive(v.Type(), v.Uint(), conf) - case reflect.Float32, reflect.Float64: - return formatPrimitive(v.Type(), v.Float(), conf) - case reflect.Complex64, reflect.Complex128: - return formatPrimitive(v.Type(), v.Complex(), conf) - case reflect.String: - return formatPrimitive(v.Type(), formatString(v.String()), conf) - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return formatPointer(v, conf) - case reflect.Ptr: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("(%v)(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] || !conf.followPointers { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - return "&" + formatAny(v.Elem(), conf, visited) - case reflect.Interface: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - return formatAny(v.Elem(), conf, visited) - case reflect.Slice: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - fallthrough - case reflect.Array: - var ss []string - subConf := conf - subConf.printType = v.Type().Elem().Kind() == reflect.Interface - for i := 0; i < v.Len(); i++ { - s := formatAny(v.Index(i), subConf, visited) - ss = append(ss, s) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Map: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - - var ss []string - keyConf, valConf := conf, conf - keyConf.printType = v.Type().Key().Kind() == reflect.Interface - keyConf.followPointers = false - valConf.printType = v.Type().Elem().Kind() == reflect.Interface - for _, k := range SortKeys(v.MapKeys()) { - sk := formatAny(k, keyConf, visited) - sv := formatAny(v.MapIndex(k), valConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", sk, sv)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Struct: - var ss []string - subConf := conf - subConf.printType = true - for i := 0; i < v.NumField(); i++ { - vv := v.Field(i) - if isZero(vv) { - continue // Elide zero value fields - } - name := v.Type().Field(i).Name - subConf.UseStringer = conf.UseStringer - s := formatAny(vv, subConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", name, s)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - default: - panic(fmt.Sprintf("%v kind not handled", v.Kind())) - } -} - -func formatString(s string) string { - // Use quoted string if it the same length as a raw string literal. - // Otherwise, attempt to use the raw string form. - qs := strconv.Quote(s) - if len(qs) == 1+len(s)+1 { - return qs - } - - // Disallow newlines to ensure output is a single line. - // Only allow printable runes for readability purposes. - rawInvalid := func(r rune) bool { - return r == '`' || r == '\n' || !unicode.IsPrint(r) - } - if strings.IndexFunc(s, rawInvalid) < 0 { - return "`" + s + "`" - } - return qs -} - -func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string { - if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") { - return fmt.Sprintf("%v(%v)", t, v) - } - return fmt.Sprintf("%v", v) -} - -func formatPointer(v reflect.Value, conf FormatConfig) string { - p := v.Pointer() - if !conf.realPointers { - p = 0 // For deterministic printing purposes - } - s := formatHex(uint64(p)) - if conf.printType { - return fmt.Sprintf("(%v)(%s)", v.Type(), s) - } - return s -} - -func formatHex(u uint64) string { - var f string - switch { - case u <= 0xff: - f = "0x%02x" - case u <= 0xffff: - f = "0x%04x" - case u <= 0xffffff: - f = "0x%06x" - case u <= 0xffffffff: - f = "0x%08x" - case u <= 0xffffffffff: - f = "0x%010x" - case u <= 0xffffffffffff: - f = "0x%012x" - case u <= 0xffffffffffffff: - f = "0x%014x" - case u <= 0xffffffffffffffff: - f = "0x%016x" - } - return fmt.Sprintf(f, u) -} - -// insertPointer insert p into m, allocating m if necessary. -func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool { - if m == nil { - m = make(map[uintptr]bool) - } - m[p] = true - return m -} - -// isZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go deleted file mode 100644 index fe8aa27a077b..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package value - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// SortKeys sorts a list of map keys, deduplicating keys if necessary. -// The type of each value must be comparable. -func SortKeys(vs []reflect.Value) []reflect.Value { - if len(vs) == 0 { - return vs - } - - // Sort the map keys. - sort.Sort(valueSorter(vs)) - - // Deduplicate keys (fails for NaNs). - vs2 := vs[:1] - for _, v := range vs[1:] { - if isLess(vs2[len(vs2)-1], v) { - vs2 = append(vs2, v) - } - } - return vs2 -} - -// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above. -type valueSorter []reflect.Value - -func (vs valueSorter) Len() int { return len(vs) } -func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) } -func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } - -// isLess is a generic function for sorting arbitrary map keys. -// The inputs must be of the same type and must be comparable. -func isLess(x, y reflect.Value) bool { - switch x.Type().Kind() { - case reflect.Bool: - return !x.Bool() && y.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return x.Int() < y.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return x.Uint() < y.Uint() - case reflect.Float32, reflect.Float64: - fx, fy := x.Float(), y.Float() - return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) - case reflect.Complex64, reflect.Complex128: - cx, cy := x.Complex(), y.Complex() - rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) - if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { - return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) - } - return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) - case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: - return x.Pointer() < y.Pointer() - case reflect.String: - return x.String() < y.String() - case reflect.Array: - for i := 0; i < x.Len(); i++ { - if isLess(x.Index(i), y.Index(i)) { - return true - } - if isLess(y.Index(i), x.Index(i)) { - return false - } - } - return false - case reflect.Struct: - for i := 0; i < x.NumField(); i++ { - if isLess(x.Field(i), y.Field(i)) { - return true - } - if isLess(y.Field(i), x.Field(i)) { - return false - } - } - return false - case reflect.Interface: - vx, vy := x.Elem(), y.Elem() - if !vx.IsValid() || !vy.IsValid() { - return !vx.IsValid() && vy.IsValid() - } - tx, ty := vx.Type(), vy.Type() - if tx == ty { - return isLess(x.Elem(), y.Elem()) - } - if tx.Kind() != ty.Kind() { - return vx.Kind() < vy.Kind() - } - if tx.String() != ty.String() { - return tx.String() < ty.String() - } - if tx.PkgPath() != ty.PkgPath() { - return tx.PkgPath() < ty.PkgPath() - } - // This can happen in rare situations, so we fallback to just comparing - // the unique pointer for a reflect.Type. This guarantees deterministic - // ordering within a program, but it is obviously not stable. - return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() - default: - // Must be Func, Map, or Slice; which are not comparable. - panic(fmt.Sprintf("%T is not comparable", x.Type())) - } -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go deleted file mode 100644 index 91d4b066e052..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "runtime" - "strings" - - "github.com/google/go-cmp/cmp/internal/function" -) - -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), -// configure how equality is determined. -// -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. -// -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. -type Option interface { - // filter applies all filters and returns the option that remains. - // Each option may only read s.curPath and call s.callTTBFunc. - // - // An Options is returned only if multiple comparers or transformers - // can apply simultaneously and will only contain values of those types - // or sub-Options containing values of those types. - filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption -} - -// applicableOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer -// Grouping: Options -type applicableOption interface { - Option - - // apply executes the option, which may mutate s or panic. - apply(s *state, vx, vy reflect.Value) -} - -// coreOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer -// Filters: *pathFilter | *valuesFilter -type coreOption interface { - Option - isCore() -} - -type core struct{} - -func (core) isCore() {} - -// Options is a list of Option values that also satisfies the Option interface. -// Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, -// it will be implicitly expanded into a flat list. -// -// Applying a filter on an Options is equivalent to applying that same filter -// on all individual options held within. -type Options []Option - -func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { - for _, opt := range opts { - switch opt := opt.filter(s, vx, vy, t); opt.(type) { - case ignore: - return ignore{} // Only ignore can short-circuit evaluation - case invalid: - out = invalid{} // Takes precedence over comparer or transformer - case *comparer, *transformer, Options: - switch out.(type) { - case nil: - out = opt - case invalid: - // Keep invalid - case *comparer, *transformer, Options: - out = Options{out, opt} // Conflicting comparers or transformers - } - } - } - return out -} - -func (opts Options) apply(s *state, _, _ reflect.Value) { - const warning = "ambiguous set of applicable options" - const help = "consider using filters to ensure at most one Comparer or Transformer may apply" - var ss []string - for _, opt := range flattenOptions(nil, opts) { - ss = append(ss, fmt.Sprint(opt)) - } - set := strings.Join(ss, "\n\t") - panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) -} - -func (opts Options) String() string { - var ss []string - for _, opt := range opts { - ss = append(ss, fmt.Sprint(opt)) - } - return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) -} - -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. -// -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. -func FilterPath(f func(Path) bool, opt Option) Option { - if f == nil { - panic("invalid path filter function") - } - if opt := normalizeOption(opt); opt != nil { - return &pathFilter{fnc: f, opt: opt} - } - return nil -} - -type pathFilter struct { - core - fnc func(Path) bool - opt Option -} - -func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { - if f.fnc(s.curPath) { - return f.opt.filter(s, vx, vy, t) - } - return nil -} - -func (f pathFilter) String() string { - fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) - return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt) -} - -// FilterValues returns a new Option where opt is only evaluated if filter f, -// which is a function of the form "func(T, T) bool", returns true for the -// current pair of values being compared. If the type of the values is not -// assignable to T, then this filter implicitly returns false. -// -// The filter function must be -// symmetric (i.e., agnostic to the order of the inputs) and -// deterministic (i.e., produces the same result when given the same inputs). -// If T is an interface, it is possible that f is called with two values with -// different concrete types that both implement T. -// -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. -func FilterValues(f interface{}, opt Option) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { - panic(fmt.Sprintf("invalid values filter function: %T", f)) - } - if opt := normalizeOption(opt); opt != nil { - vf := &valuesFilter{fnc: v, opt: opt} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - vf.typ = ti - } - return vf - } - return nil -} - -type valuesFilter struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool - opt Option -} - -func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { - if !vx.IsValid() || !vy.IsValid() { - return invalid{} - } - if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { - return f.opt.filter(s, vx, vy, t) - } - return nil -} - -func (f valuesFilter) String() string { - fn := getFuncName(f.fnc.Pointer()) - return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt) -} - -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. -func Ignore() Option { return ignore{} } - -type ignore struct{ core } - -func (ignore) isFiltered() bool { return false } -func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } -func (ignore) apply(_ *state, _, _ reflect.Value) { return } -func (ignore) String() string { return "Ignore()" } - -// invalid is a sentinel Option type to indicate that some options could not -// be evaluated due to unexported fields. -type invalid struct{ core } - -func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } -func (invalid) apply(s *state, _, _ reflect.Value) { - const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) -} - -// Transformer returns an Option that applies a transformation function that -// converts values of a certain type into that of another. -// -// The transformer f must be a function "func(T) R" that converts values of -// type T to those of type R and is implicitly filtered to input values -// assignable to T. The transformer must not mutate T in any way. -// -// To help prevent some cases of infinite recursive cycles applying the -// same transform to the output of itself (e.g., in the case where the -// input and output types are the same), an implicit filter is added such that -// a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. -// -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep. If empty, an arbitrary name is used. -func Transformer(name string, f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { - panic(fmt.Sprintf("invalid transformer function: %T", f)) - } - if name == "" { - name = "λ" // Lambda-symbol as place-holder for anonymous transformer - } - if !isValid(name) { - panic(fmt.Sprintf("invalid name: %q", name)) - } - tr := &transformer{name: name, fnc: reflect.ValueOf(f)} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - tr.typ = ti - } - return tr -} - -type transformer struct { - core - name string - typ reflect.Type // T - fnc reflect.Value // func(T) R -} - -func (tr *transformer) isFiltered() bool { return tr.typ != nil } - -func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption { - for i := len(s.curPath) - 1; i >= 0; i-- { - if t, ok := s.curPath[i].(*transform); !ok { - break // Hit most recent non-Transform step - } else if tr == t.trans { - return nil // Cannot directly use same Transform - } - } - if tr.typ == nil || t.AssignableTo(tr.typ) { - return tr - } - return nil -} - -func (tr *transformer) apply(s *state, vx, vy reflect.Value) { - // Update path before calling the Transformer so that dynamic checks - // will use the updated path. - s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) - defer s.curPath.pop() - - vx = s.callTRFunc(tr.fnc, vx) - vy = s.callTRFunc(tr.fnc, vy) - s.compareAny(vx, vy) -} - -func (tr transformer) String() string { - return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) -} - -// Comparer returns an Option that determines whether two values are equal -// to each other. -// -// The comparer f must be a function "func(T, T) bool" and is implicitly -// filtered to input values assignable to T. If T is an interface, it is -// possible that f is called with two values of different concrete types that -// both implement T. -// -// The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y -func Comparer(f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Equal) || v.IsNil() { - panic(fmt.Sprintf("invalid comparer function: %T", f)) - } - cm := &comparer{fnc: v} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - cm.typ = ti - } - return cm -} - -type comparer struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (cm *comparer) isFiltered() bool { return cm.typ != nil } - -func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { - if cm.typ == nil || t.AssignableTo(cm.typ) { - return cm - } - return nil -} - -func (cm *comparer) apply(s *state, vx, vy reflect.Value) { - eq := s.callTTBFunc(cm.fnc, vx, vy) - s.report(eq, vx, vy) -} - -func (cm comparer) String() string { - return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) -} - -// AllowUnexported returns an Option that forcibly allows operations on -// unexported fields in certain structs, which are specified by passing in a -// value of each struct type. -// -// Users of this option must understand that comparing on unexported fields -// from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal -// to unexpectedly change. However, it may be valid to use this option on types -// defined in an internal package where the semantic meaning of an unexported -// field is in the control of the user. -// -// For some cases, a custom Comparer should be used instead that defines -// equality as a function of the public API of a type rather than the underlying -// unexported implementation. -// -// For example, the reflect.Type documentation defines equality to be determined -// by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested -// in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: -// -// Comparer(func(x, y reflect.Type) bool { return x == y }) -// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) -// -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. -func AllowUnexported(types ...interface{}) Option { - if !supportAllowUnexported { - panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") - } - m := make(map[reflect.Type]bool) - for _, typ := range types { - t := reflect.TypeOf(typ) - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) - } - m[t] = true - } - return visibleStructs(m) -} - -type visibleStructs map[reflect.Type]bool - -func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { - panic("not implemented") -} - -// reporter is an Option that configures how differences are reported. -type reporter interface { - // TODO: Not exported yet. - // - // Perhaps add PushStep and PopStep and change Report to only accept - // a PathStep instead of the full-path? Adding a PushStep and PopStep makes - // it clear that we are traversing the value tree in a depth-first-search - // manner, which has an effect on how values are printed. - - Option - - // Report is called for every comparison made and will be provided with - // the two values being compared, the equality result, and the - // current path in the value tree. It is possible for x or y to be an - // invalid reflect.Value if one of the values is non-existent; - // which is possible with maps and slices. - Report(x, y reflect.Value, eq bool, p Path) -} - -// normalizeOption normalizes the input options such that all Options groups -// are flattened and groups with a single element are reduced to that element. -// Only coreOptions and Options containing coreOptions are allowed. -func normalizeOption(src Option) Option { - switch opts := flattenOptions(nil, Options{src}); len(opts) { - case 0: - return nil - case 1: - return opts[0] - default: - return opts - } -} - -// flattenOptions copies all options in src to dst as a flat list. -// Only coreOptions and Options containing coreOptions are allowed. -func flattenOptions(dst, src Options) Options { - for _, opt := range src { - switch opt := opt.(type) { - case nil: - continue - case Options: - dst = flattenOptions(dst, opt) - case coreOption: - dst = append(dst, opt) - default: - panic(fmt.Sprintf("invalid option type: %T", opt)) - } - } - return dst -} - -// getFuncName returns a short function name from the pointer. -// The string parsing logic works up until Go1.9. -func getFuncName(p uintptr) string { - fnc := runtime.FuncForPC(p) - if fnc == nil { - return "" - } - name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm" - if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") { - // Strip the package name from method name. - name = strings.TrimSuffix(name, ")-fm") - name = strings.TrimSuffix(name, ")·fm") - if i := strings.LastIndexByte(name, '('); i >= 0 { - methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc" - if j := strings.LastIndexByte(methodName, '.'); j >= 0 { - methodName = methodName[j+1:] // E.g., "myfunc" - } - name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc" - } - } - if i := strings.LastIndexByte(name, '/'); i >= 0 { - // Strip the package name. - name = name[i+1:] // E.g., "mypkg.(mytype).myfunc" - } - return name -} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go deleted file mode 100644 index c08a3cf80d9b..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -type ( - // Path is a list of PathSteps describing the sequence of operations to get - // from some root type to the current position in the value tree. - // The first Path element is always an operation-less PathStep that exists - // simply to identify the initial type. - // - // When traversing structs with embedded structs, the embedded struct will - // always be accessed as a field before traversing the fields of the - // embedded struct themselves. That is, an exported field from the - // embedded struct will never be accessed directly from the parent struct. - Path []PathStep - - // PathStep is a union-type for specific operations to traverse - // a value's tree structure. Users of this package never need to implement - // these types as values of this type will be returned by this package. - PathStep interface { - String() string - Type() reflect.Type // Resulting type after performing the path step - isPathStep() - } - - // SliceIndex is an index operation on a slice or array at some index Key. - SliceIndex interface { - PathStep - Key() int // May return -1 if in a split state - - // SplitKeys returns the indexes for indexing into slices in the - // x and y values, respectively. These indexes may differ due to the - // insertion or removal of an element in one of the slices, causing - // all of the indexes to be shifted. If an index is -1, then that - // indicates that the element does not exist in the associated slice. - // - // Key is guaranteed to return -1 if and only if the indexes returned - // by SplitKeys are not the same. SplitKeys will never return -1 for - // both indexes. - SplitKeys() (x int, y int) - - isSliceIndex() - } - // MapIndex is an index operation on a map at some index Key. - MapIndex interface { - PathStep - Key() reflect.Value - isMapIndex() - } - // TypeAssertion represents a type assertion on an interface. - TypeAssertion interface { - PathStep - isTypeAssertion() - } - // StructField represents a struct field access on a field called Name. - StructField interface { - PathStep - Name() string - Index() int - isStructField() - } - // Indirect represents pointer indirection on the parent type. - Indirect interface { - PathStep - isIndirect() - } - // Transform is a transformation from the parent type to the current type. - Transform interface { - PathStep - Name() string - Func() reflect.Value - - // Option returns the originally constructed Transformer option. - // The == operator can be used to detect the exact option used. - Option() Option - - isTransform() - } -) - -func (pa *Path) push(s PathStep) { - *pa = append(*pa, s) -} - -func (pa *Path) pop() { - *pa = (*pa)[:len(*pa)-1] -} - -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. -func (pa Path) Last() PathStep { - return pa.Index(-1) -} - -// Index returns the ith step in the Path and supports negative indexing. -// A negative index starts counting from the tail of the Path such that -1 -// refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. -func (pa Path) Index(i int) PathStep { - if i < 0 { - i = len(pa) + i - } - if i < 0 || i >= len(pa) { - return pathStep{} - } - return pa[i] -} - -// String returns the simplified path to a node. -// The simplified path only contains struct field accesses. -// -// For example: -// MyMap.MySlices.MyField -func (pa Path) String() string { - var ss []string - for _, s := range pa { - if _, ok := s.(*structField); ok { - ss = append(ss, s.String()) - } - } - return strings.TrimPrefix(strings.Join(ss, ""), ".") -} - -// GoString returns the path to a specific node using Go syntax. -// -// For example: -// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField -func (pa Path) GoString() string { - var ssPre, ssPost []string - var numIndirect int - for i, s := range pa { - var nextStep PathStep - if i+1 < len(pa) { - nextStep = pa[i+1] - } - switch s := s.(type) { - case *indirect: - numIndirect++ - pPre, pPost := "(", ")" - switch nextStep.(type) { - case *indirect: - continue // Next step is indirection, so let them batch up - case *structField: - numIndirect-- // Automatic indirection on struct fields - case nil: - pPre, pPost = "", "" // Last step; no need for parenthesis - } - if numIndirect > 0 { - ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) - ssPost = append(ssPost, pPost) - } - numIndirect = 0 - continue - case *transform: - ssPre = append(ssPre, s.trans.name+"(") - ssPost = append(ssPost, ")") - continue - case *typeAssertion: - // As a special-case, elide type assertions on anonymous types - // since they are typically generated dynamically and can be very - // verbose. For example, some transforms return interface{} because - // of Go's lack of generics, but typically take in and return the - // exact same concrete type. - if s.Type().PkgPath() == "" { - continue - } - } - ssPost = append(ssPost, s.String()) - } - for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { - ssPre[i], ssPre[j] = ssPre[j], ssPre[i] - } - return strings.Join(ssPre, "") + strings.Join(ssPost, "") -} - -type ( - pathStep struct { - typ reflect.Type - } - - sliceIndex struct { - pathStep - xkey, ykey int - } - mapIndex struct { - pathStep - key reflect.Value - } - typeAssertion struct { - pathStep - } - structField struct { - pathStep - name string - idx int - - // These fields are used for forcibly accessing an unexported field. - // pvx, pvy, and field are only valid if unexported is true. - unexported bool - force bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values - field reflect.StructField // Field information - } - indirect struct { - pathStep - } - transform struct { - pathStep - trans *transformer - } -) - -func (ps pathStep) Type() reflect.Type { return ps.typ } -func (ps pathStep) String() string { - if ps.typ == nil { - return "" - } - s := ps.typ.String() - if s == "" || strings.ContainsAny(s, "{}\n") { - return "root" // Type too simple or complex to print - } - return fmt.Sprintf("{%s}", s) -} - -func (si sliceIndex) String() string { - switch { - case si.xkey == si.ykey: - return fmt.Sprintf("[%d]", si.xkey) - case si.ykey == -1: - // [5->?] means "I don't know where X[5] went" - return fmt.Sprintf("[%d->?]", si.xkey) - case si.xkey == -1: - // [?->3] means "I don't know where Y[3] came from" - return fmt.Sprintf("[?->%d]", si.ykey) - default: - // [5->3] means "X[5] moved to Y[3]" - return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) - } -} -func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } -func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } -func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) } -func (in indirect) String() string { return "*" } -func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } - -func (si sliceIndex) Key() int { - if si.xkey != si.ykey { - return -1 - } - return si.xkey -} -func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey } -func (mi mapIndex) Key() reflect.Value { return mi.key } -func (sf structField) Name() string { return sf.name } -func (sf structField) Index() int { return sf.idx } -func (tf transform) Name() string { return tf.trans.name } -func (tf transform) Func() reflect.Value { return tf.trans.fnc } -func (tf transform) Option() Option { return tf.trans } - -func (pathStep) isPathStep() {} -func (sliceIndex) isSliceIndex() {} -func (mapIndex) isMapIndex() {} -func (typeAssertion) isTypeAssertion() {} -func (structField) isStructField() {} -func (indirect) isIndirect() {} -func (transform) isTransform() {} - -var ( - _ SliceIndex = sliceIndex{} - _ MapIndex = mapIndex{} - _ TypeAssertion = typeAssertion{} - _ StructField = structField{} - _ Indirect = indirect{} - _ Transform = transform{} - - _ PathStep = sliceIndex{} - _ PathStep = mapIndex{} - _ PathStep = typeAssertion{} - _ PathStep = structField{} - _ PathStep = indirect{} - _ PathStep = transform{} -) - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} - -// isValid reports whether the identifier is valid. -// Empty and underscore-only strings are not valid. -func isValid(id string) bool { - ok := id != "" && id != "_" - for j, c := range id { - ok = ok && (j > 0 || !unicode.IsDigit(c)) - ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c)) - } - return ok -} diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go deleted file mode 100644 index 20e9f18e0dd5..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/reporter.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/value" -) - -type defaultReporter struct { - Option - diffs []string // List of differences, possibly truncated - ndiffs int // Total number of differences - nbytes int // Number of bytes in diffs - nlines int // Number of lines in diffs -} - -var _ reporter = (*defaultReporter)(nil) - -func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) { - if eq { - return // Ignore equal results - } - const maxBytes = 4096 - const maxLines = 256 - r.ndiffs++ - if r.nbytes < maxBytes && r.nlines < maxLines { - sx := value.Format(x, value.FormatConfig{UseStringer: true}) - sy := value.Format(y, value.FormatConfig{UseStringer: true}) - if sx == sy { - // Unhelpful output, so use more exact formatting. - sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true}) - sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true}) - } - s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy) - r.diffs = append(r.diffs, s) - r.nbytes += len(s) - r.nlines += strings.Count(s, "\n") - } -} - -func (r *defaultReporter) String() string { - s := strings.Join(r.diffs, "") - if r.ndiffs == len(r.diffs) { - return s - } - return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs)) -} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go deleted file mode 100644 index d1518eb3a8c7..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build purego appengine js - -package cmp - -import "reflect" - -const supportAllowUnexported = false - -func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { - panic("unsafeRetrieveField is not implemented") -} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go deleted file mode 100644 index 579b65507f6b..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !purego,!appengine,!js - -package cmp - -import ( - "reflect" - "unsafe" -) - -const supportAllowUnexported = true - -// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct -// such that the value has read-write permissions. -// -// The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { - return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() -} diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE deleted file mode 100644 index 7a4a3ea2424c..000000000000 --- a/vendor/github.com/google/go-containerregistry/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/cmd/ko/test/kodata/kenobi b/vendor/github.com/google/go-containerregistry/cmd/ko/test/kodata/kenobi deleted file mode 120000 index 5d7eddc7f33a..000000000000 --- a/vendor/github.com/google/go-containerregistry/cmd/ko/test/kodata/kenobi +++ /dev/null @@ -1 +0,0 @@ -../kenobi \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go deleted file mode 100644 index c9c08ec73773..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// anonymous implements Authenticator for anonymous authentication. -type anonymous struct{} - -// Authorization implements Authenticator. -func (a *anonymous) Authorization() (string, error) { - return "", nil -} - -// Anonymous is a singleton Authenticator for providing anonymous auth. -var Anonymous Authenticator = &anonymous{} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go deleted file mode 100644 index c39ee5a9f9a2..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "fmt" -) - -// auth implements Authenticator for an "auth" entry of the docker config. -type auth struct { - token string -} - -// Authorization implements Authenticator. -func (a *auth) Authorization() (string, error) { - return fmt.Sprintf("Basic %s", a.token), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go deleted file mode 100644 index 30e935cbc973..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// Authenticator is used to authenticate Docker transports. -type Authenticator interface { - // Authorization returns the value to use in an http transport's Authorization header. - Authorization() (string, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go deleted file mode 100644 index 7cd4984069b6..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "encoding/base64" - "fmt" -) - -// Basic implements Authenticator for basic authentication. -type Basic struct { - Username string - Password string -} - -// Authorization implements Authenticator. -func (b *Basic) Authorization() (string, error) { - delimited := fmt.Sprintf("%s:%s", b.Username, b.Password) - encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) - return fmt.Sprintf("Basic %s", encoded), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go deleted file mode 100644 index cb1ae5845ac7..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "fmt" -) - -// Bearer implements Authenticator for bearer authentication. -type Bearer struct { - Token string `json:"token"` -} - -// Authorization implements Authenticator. -func (b *Bearer) Authorization() (string, error) { - return fmt.Sprintf("Bearer %s", b.Token), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go deleted file mode 100644 index c2a5fc026755..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package authn defines different methods of authentication for -// talking to a container registry. -package authn diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go deleted file mode 100644 index c8ba4e6e24f8..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "strings" - - "github.com/google/go-containerregistry/pkg/name" -) - -// magicNotFoundMessage is the string that the CLI special cases to mean -// that a given registry domain wasn't found. -const ( - magicNotFoundMessage = "credentials not found in native keychain" -) - -// runner allows us to swap out how we "Run" os/exec commands. -type runner interface { - Run(*exec.Cmd) error -} - -// defaultRunner implements runner by just calling Run(). -type defaultRunner struct{} - -// Run implements runner. -func (dr *defaultRunner) Run(cmd *exec.Cmd) error { - return cmd.Run() -} - -// helper executes the named credential helper against the given domain. -type helper struct { - name string - domain name.Registry - - // We add this layer of indirection to facilitate unit testing. - r runner -} - -// helperOutput is the expected JSON output form of a credential helper -// (or at least these are the fields that we care about). -type helperOutput struct { - Username string - Secret string -} - -// Authorization implements Authenticator. -func (h *helper) Authorization() (string, error) { - helperName := fmt.Sprintf("docker-credential-%s", h.name) - // We want to execute: - // echo -n {domain} | docker-credential-{name} get - cmd := exec.Command(helperName, "get") - - // Some keychains expect a scheme: - // https://github.com/bazelbuild/rules_docker/issues/111 - cmd.Stdin = strings.NewReader(fmt.Sprintf("https://%v", h.domain)) - - var out bytes.Buffer - cmd.Stdout = &out - cmdErr := h.r.Run(cmd) - - // If we see this specific message, it means the domain wasn't found - // and we should fall back on anonymous auth. - output := strings.TrimSpace(out.String()) - if output == magicNotFoundMessage { - return Anonymous.Authorization() - } - - // Any other output should be parsed as JSON and the Username / Secret - // fields used for Basic authentication. - ho := helperOutput{} - if err := json.Unmarshal([]byte(output), &ho); err != nil { - if cmdErr != nil { - // If we failed to parse output, it won't contain Secret, so returning it - // in an error should be fine. - return "", fmt.Errorf("invoking %s: %v; output: %s", helperName, cmdErr, output) - } - return "", err - } - - if cmdErr != nil { - return "", fmt.Errorf("invoking %s: %v", helperName, cmdErr) - } - - b := Basic{Username: ho.Username, Password: ho.Secret} - return b.Authorization() -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go deleted file mode 100644 index aee1fedb9944..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "runtime" - - "github.com/google/go-containerregistry/pkg/name" -) - -// Keychain is an interface for resolving an image reference to a credential. -type Keychain interface { - // Resolve looks up the most appropriate credential for the specified registry. - Resolve(name.Registry) (Authenticator, error) -} - -// defaultKeychain implements Keychain with the semantics of the standard Docker -// credential keychain. -type defaultKeychain struct{} - -// configDir returns the directory containing Docker's config.json -func configDir() (string, error) { - if dc := os.Getenv("DOCKER_CONFIG"); dc != "" { - return dc, nil - } - if h := dockerUserHomeDir(); h != "" { - return filepath.Join(dockerUserHomeDir(), ".docker"), nil - } - return "", errNoHomeDir -} - -var errNoHomeDir = errors.New("could not determine home directory") - -// dockerUserHomeDir returns the current user's home directory, as interpreted by Docker. -func dockerUserHomeDir() string { - if runtime.GOOS == "windows" { - // Docker specifically expands "%USERPROFILE%" on Windows, - return os.Getenv("USERPROFILE") - } - // Docker defaults to "$HOME" Linux and OSX. - return os.Getenv("HOME") -} - -// authEntry is a helper for JSON parsing an "auth" entry of config.json -// This is not meant for direct consumption. -type authEntry struct { - Auth string `json:"auth"` - Username string `json:"username"` - Password string `json:"password"` -} - -// cfg is a helper for JSON parsing Docker's config.json -// This is not meant for direct consumption. -type cfg struct { - CredHelper map[string]string `json:"credHelpers,omitempty"` - CredStore string `json:"credsStore,omitempty"` - Auths map[string]authEntry `json:"auths,omitempty"` -} - -// There are a variety of ways a domain may get qualified within the Docker credential file. -// We enumerate them here as format strings. -var ( - domainForms = []string{ - // Allow naked domains - "%s", - // Allow scheme-prefixed. - "https://%s", - "http://%s", - // Allow scheme-prefixes with version in url path. - "https://%s/v1/", - "http://%s/v1/", - "https://%s/v2/", - "http://%s/v2/", - } - - // Export an instance of the default keychain. - DefaultKeychain Keychain = &defaultKeychain{} -) - -// Resolve implements Keychain. -func (dk *defaultKeychain) Resolve(reg name.Registry) (Authenticator, error) { - dir, err := configDir() - if err != nil { - log.Printf("Unable to determine config dir: %v", err) - return Anonymous, nil - } - file := filepath.Join(dir, "config.json") - content, err := ioutil.ReadFile(file) - if err != nil { - log.Printf("Unable to read %q: %v", file, err) - return Anonymous, nil - } - - var cf cfg - if err := json.Unmarshal(content, &cf); err != nil { - log.Printf("Unable to parse %q: %v", file, err) - return Anonymous, nil - } - - // Per-registry credential helpers take precedence. - if cf.CredHelper != nil { - for _, form := range domainForms { - if entry, ok := cf.CredHelper[fmt.Sprintf(form, reg.Name())]; ok { - return &helper{name: entry, domain: reg, r: &defaultRunner{}}, nil - } - } - } - - // A global credential helper is next in precedence. - if cf.CredStore != "" { - return &helper{name: cf.CredStore, domain: reg, r: &defaultRunner{}}, nil - } - - // Lastly, the 'auths' section directly contains basic auth entries. - if cf.Auths != nil { - for _, form := range domainForms { - if entry, ok := cf.Auths[fmt.Sprintf(form, reg.Name())]; ok { - if entry.Auth != "" { - return &auth{entry.Auth}, nil - } else if entry.Username != "" { - return &Basic{Username: entry.Username, Password: entry.Password}, nil - } else { - // TODO(mattmoor): Support identitytoken - // TODO(mattmoor): Support registrytoken - return nil, fmt.Errorf("Unsupported entry in \"auths\" section of %q", file) - } - } - } - } - - // Fallback on anonymous. - return Anonymous, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go deleted file mode 100644 index 9d7fb3145cf3..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "github.com/google/go-containerregistry/pkg/name" -) - -type multiKeychain struct { - keychains []Keychain -} - -// Assert that our multi-keychain implements Keychain. -var _ (Keychain) = (*multiKeychain)(nil) - -// NewMultiKeychain composes a list of keychains into one new keychain. -func NewMultiKeychain(kcs ...Keychain) Keychain { - return &multiKeychain{keychains: kcs} -} - -// Resolve implements Keychain. -func (mk *multiKeychain) Resolve(reg name.Registry) (Authenticator, error) { - for _, kc := range mk.keychains { - auth, err := kc.Resolve(reg) - if err != nil { - return nil, err - } - if auth != Anonymous { - return auth, nil - } - } - return Anonymous, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go deleted file mode 100644 index 01a25d554a95..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/check.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" - "unicode/utf8" -) - -// Strictness defines the level of strictness for name validation. -type Strictness int - -// Enums for CRUD operations. -const ( - StrictValidation Strictness = iota - WeakValidation -) - -// stripRunesFn returns a function which returns -1 (i.e. a value which -// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. -func stripRunesFn(runes string) func(rune) rune { - return func(r rune) rune { - if strings.ContainsRune(runes, r) { - return -1 - } - return r - } -} - -// checkElement checks a given named element matches character and length restrictions. -// Returns true if the given element adheres to the given restrictions, false otherwise. -func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { - numRunes := utf8.RuneCountInString(element) - if (numRunes < minRunes) || (maxRunes < numRunes) { - return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element) - } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { - return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element) - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go deleted file mode 100644 index dc573ef1d8c7..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package name defines structured types for representing image references. -package name - -import ( - "strings" -) - -const ( - // These have the form: sha256: - // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation. - digestChars = "sh:0123456789abcdef" - digestDelim = "@" -) - -// Digest stores a digest name in a structured form. -type Digest struct { - Repository - digest string -} - -// Ensure Digest implements Reference -var _ Reference = (*Digest)(nil) - -// Context implements Reference. -func (d Digest) Context() Repository { - return d.Repository -} - -// Identifier implements Reference. -func (d Digest) Identifier() string { - return d.DigestStr() -} - -// DigestStr returns the digest component of the Digest. -func (d Digest) DigestStr() string { - return d.digest -} - -// Name returns the name from which the Digest was derived. -func (d Digest) Name() string { - return d.Repository.Name() + digestDelim + d.DigestStr() -} - -func (d Digest) String() string { - return d.Name() -} - -func checkDigest(name string) error { - return checkElement("digest", name, digestChars, 7+64, 7+64) -} - -// NewDigest returns a new Digest representing the given name, according to the given strictness. -func NewDigest(name string, strict Strictness) (Digest, error) { - // Split on "@" - parts := strings.Split(name, digestDelim) - if len(parts) != 2 { - return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) - } - base := parts[0] - digest := parts[1] - - // Always check that the digest is valid. - if err := checkDigest(digest); err != nil { - return Digest{}, err - } - - tag, err := NewTag(base, strict) - if err == nil { - base = tag.Repository.Name() - } - - repo, err := NewRepository(base, strict) - if err != nil { - return Digest{}, err - } - return Digest{repo, digest}, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go deleted file mode 100644 index 7847cc5d1ef8..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import "fmt" - -// ErrBadName is an error for when a bad docker name is supplied. -type ErrBadName struct { - info string -} - -func (e *ErrBadName) Error() string { - return e.info -} - -// NewErrBadName returns a ErrBadName which returns the given formatted string from Error(). -func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName { - return &ErrBadName{fmt.Sprintf(fmtStr, args...)} -} - -// IsErrBadName returns true if the given error is an ErrBadName. -func IsErrBadName(err error) bool { - _, ok := err.(*ErrBadName) - return ok -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go deleted file mode 100644 index 58775daa300c..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "errors" - "fmt" -) - -// Reference defines the interface that consumers use when they can -// take either a tag or a digest. -type Reference interface { - fmt.Stringer - - // Context accesses the Repository context of the reference. - Context() Repository - - // Identifier accesses the type-specific portion of the reference. - Identifier() string - - // Name is the fully-qualified reference name. - Name() string - - // Scope is the scope needed to access this reference. - Scope(string) string -} - -// ParseReference parses the string as a reference, either by tag or digest. -func ParseReference(s string, strict Strictness) (Reference, error) { - if t, err := NewTag(s, strict); err == nil { - return t, nil - } - if d, err := NewDigest(s, strict); err == nil { - return d, nil - } - // TODO: Combine above errors into something more useful? - return nil, errors.New("could not parse reference") -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go deleted file mode 100644 index ab74193080f5..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "net" - "net/url" - "regexp" - "strings" -) - -const ( - // DefaultRegistry is Docker Hub, assumed when a hostname is omitted. - DefaultRegistry = "index.docker.io" - defaultRegistryAlias = "docker.io" -) - -// Detect more complex forms of local references. -var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) - -// Detect the loopback IP (127.0.0.1) -var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) - -// Detect the loopback IPV6 (::1) -var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) - -// Registry stores a docker registry name in a structured form. -type Registry struct { - insecure bool - registry string -} - -// RegistryStr returns the registry component of the Registry. -func (r Registry) RegistryStr() string { - if r.registry != "" { - return r.registry - } - return DefaultRegistry -} - -// Name returns the name from which the Registry was derived. -func (r Registry) Name() string { - return r.RegistryStr() -} - -func (r Registry) String() string { - return r.Name() -} - -// Scope returns the scope required to access the registry. -func (r Registry) Scope(string) string { - // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z - return "registry:catalog:*" -} - -func (r Registry) isRFC1918() bool { - ipStr := strings.Split(r.Name(), ":")[0] - ip := net.ParseIP(ipStr) - if ip == nil { - return false - } - for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { - _, block, _ := net.ParseCIDR(cidr) - if block.Contains(ip) { - return true - } - } - return false -} - -// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. -func (r Registry) Scheme() string { - if r.insecure { - return "http" - } - if r.isRFC1918() { - return "http" - } - if strings.HasPrefix(r.Name(), "localhost:") { - return "http" - } - if reLocal.MatchString(r.Name()) { - return "http" - } - if reLoopback.MatchString(r.Name()) { - return "http" - } - if reipv6Loopback.MatchString(r.Name()) { - return "http" - } - return "https" -} - -func checkRegistry(name string) error { - // Per RFC 3986, registries (authorities) are required to be prefixed with "//" - // url.Host == hostname[:port] == authority - if url, err := url.Parse("//" + name); err != nil || url.Host != name { - return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) - } - return nil -} - -// NewRegistry returns a Registry based on the given name. -// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. -func NewRegistry(name string, strict Strictness) (Registry, error) { - if strict == StrictValidation && len(name) == 0 { - return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined") - } - - if err := checkRegistry(name); err != nil { - return Registry{}, err - } - - // Rewrite "docker.io" to "index.docker.io". - // See: https://github.com/google/go-containerregistry/issues/68 - if name == defaultRegistryAlias { - name = DefaultRegistry - } - - return Registry{registry: name}, nil -} - -// NewInsecureRegistry returns an Insecure Registry based on the given name. -// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. -func NewInsecureRegistry(name string, strict Strictness) (Registry, error) { - reg, err := NewRegistry(name, strict) - if err != nil { - return Registry{}, err - } - reg.insecure = true - return reg, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go deleted file mode 100644 index 43cc5b82b3fb..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "fmt" - "strings" -) - -const ( - defaultNamespace = "library" - repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" - regRepoDelimiter = "/" -) - -// Repository stores a docker repository name in a structured form. -type Repository struct { - Registry - repository string -} - -// See https://docs.docker.com/docker-hub/official_repos -func hasImplicitNamespace(repo string, reg Registry) bool { - return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry -} - -// RepositoryStr returns the repository component of the Repository. -func (r Repository) RepositoryStr() string { - if hasImplicitNamespace(r.repository, r.Registry) { - return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) - } - return r.repository -} - -// Name returns the name from which the Repository was derived. -func (r Repository) Name() string { - regName := r.Registry.Name() - if regName != "" { - return regName + regRepoDelimiter + r.RepositoryStr() - } - return r.RepositoryStr() -} - -func (r Repository) String() string { - return r.Name() -} - -// Scope returns the scope required to perform the given action on the registry. -// TODO(jonjohnsonjr): consider moving scopes to a separate package. -func (r Repository) Scope(action string) string { - return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) -} - -func checkRepository(repository string) error { - return checkElement("repository", repository, repositoryChars, 2, 255) -} - -// NewRepository returns a new Repository representing the given name, according to the given strictness. -func NewRepository(name string, strict Strictness) (Repository, error) { - if len(name) == 0 { - return Repository{}, NewErrBadName("a repository name must be specified") - } - - var registry string - repo := name - parts := strings.SplitN(name, regRepoDelimiter, 2) - if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { - // The first part of the repository is treated as the registry domain - // iff it contains a '.' or ':' character, otherwise it is all repository - // and the domain defaults to Docker Hub. - registry = parts[0] - repo = parts[1] - } - - if err := checkRepository(repo); err != nil { - return Repository{}, err - } - - reg, err := NewRegistry(registry, strict) - if err != nil { - return Repository{}, err - } - if hasImplicitNamespace(repo, reg) && strict == StrictValidation { - return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')") - } - return Repository{reg, repo}, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go deleted file mode 100644 index b8375e1f9bde..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" -) - -const ( - defaultTag = "latest" - // TODO(dekkagaijin): use the docker/distribution regexes for validation. - tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" - tagDelim = ":" -) - -// Tag stores a docker tag name in a structured form. -type Tag struct { - Repository - tag string -} - -// Ensure Tag implements Reference -var _ Reference = (*Tag)(nil) - -// Context implements Reference. -func (t Tag) Context() Repository { - return t.Repository -} - -// Identifier implements Reference. -func (t Tag) Identifier() string { - return t.TagStr() -} - -// TagStr returns the tag component of the Tag. -func (t Tag) TagStr() string { - if t.tag != "" { - return t.tag - } - return defaultTag -} - -// Name returns the name from which the Tag was derived. -func (t Tag) Name() string { - return t.Repository.Name() + tagDelim + t.TagStr() -} - -func (t Tag) String() string { - return t.Name() -} - -// Scope returns the scope required to perform the given action on the tag. -func (t Tag) Scope(action string) string { - return t.Repository.Scope(action) -} - -func checkTag(name string) error { - return checkElement("tag", name, tagChars, 1, 127) -} - -// NewTag returns a new Tag representing the given name, according to the given strictness. -func NewTag(name string, strict Strictness) (Tag, error) { - base := name - tag := "" - - // Split on ":" - parts := strings.Split(name, tagDelim) - // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { - base = strings.Join(parts[:len(parts)-1], tagDelim) - tag = parts[len(parts)-1] - } - - // We don't require a tag, but if we get one check it's valid, - // even when not being strict. - // If we are being strict, we want to validate the tag regardless in case - // it's empty. - if tag != "" || strict == StrictValidation { - if err := checkTag(tag); err != nil { - return Tag{}, err - } - } - - repo, err := NewRepository(base, strict) - if err != nil { - return Tag{}, err - } - return Tag{repo, tag}, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go deleted file mode 100644 index 3d8d6d30db47..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "encoding/json" - "io" - "time" -) - -// ConfigFile is the configuration file that holds the metadata describing -// how to launch a container. See: -// https://github.com/opencontainers/image-spec/blob/master/config.md -type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - History []History `json:"history,omitempty"` - OS string `json:"os"` - RootFS RootFS `json:"rootfs"` - Config Config `json:"config"` - ContainerConfig Config `json:"container_config,omitempty"` - OSVersion string `json:"osversion,omitempty"` -} - -// History is one entry of a list recording how this container image was built. -type History struct { - Author string `json:"author,omitempty"` - Created Time `json:"created,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Time is a wrapper around time.Time to help with deep copying -type Time struct { - time.Time -} - -// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time -// type is effectively immutable in the time API, so it is safe to -// copy-by-assign, despite the presence of (unexported) Pointer fields. -func (t *Time) DeepCopyInto(out *Time) { - *out = *t -} - -// RootFS holds the ordered list of file system deltas that comprise the -// container image's root filesystem. -type RootFS struct { - Type string `json:"type"` - DiffIDs []Hash `json:"diff_ids"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config is a submessage of the config file described as: -// The execution parameters which SHOULD be used as a base when running -// a container using the image. -// The names of the fields in this message are chosen to reflect the JSON -// payload of the Config as defined here: -// https://git.io/vrAET -// and -// https://github.com/opencontainers/image-spec/blob/master/config.md -type Config struct { - AttachStderr bool - AttachStdin bool - AttachStdout bool - Cmd []string - Healthcheck *HealthConfig - Domainname string - Entrypoint []string - Env []string - Hostname string - Image string - Labels map[string]string - OnBuild []string - OpenStdin bool - StdinOnce bool - Tty bool - User string - Volumes map[string]struct{} - WorkingDir string - ExposedPorts map[string]struct{} - ArgsEscaped bool - NetworkDisabled bool - MacAddress string - StopSignal string - Shell []string -} - -// ParseConfigFile parses the io.Reader's contents into a ConfigFile. -func ParseConfigFile(r io.Reader) (*ConfigFile, error) { - cf := ConfigFile{} - if err := json.NewDecoder(r).Decode(&cf); err != nil { - return nil, err - } - return &cf, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go deleted file mode 100644 index 7273ec5ab875..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . -// +k8s:deepcopy-gen=package - -// Package v1 defines structured types for OCI v1 images -package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go deleted file mode 100644 index 40933030d373..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "strconv" - "strings" -) - -// Hash is an unqualified digest of some content, e.g. sha256:deadbeef -type Hash struct { - // Algorithm holds the algorithm used to compute the hash. - Algorithm string - - // Hex holds the hex portion of the content hash. - Hex string -} - -// String reverses NewHash returning the string-form of the hash. -func (h Hash) String() string { - return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) -} - -// NewHash validates the input string is a hash and returns a strongly type Hash object. -func NewHash(s string) (Hash, error) { - h := Hash{} - if err := h.parse(s); err != nil { - return Hash{}, err - } - return h, nil -} - -// MarshalJSON implements json.Marshaler -func (h Hash) MarshalJSON() ([]byte, error) { - return json.Marshal(h.String()) -} - -// UnmarshalJSON implements json.Unmarshaler -func (h *Hash) UnmarshalJSON(data []byte) error { - s, err := strconv.Unquote(string(data)) - if err != nil { - return err - } - return h.parse(s) -} - -// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") -func Hasher(name string) (hash.Hash, error) { - switch name { - case "sha256": - return sha256.New(), nil - default: - return nil, fmt.Errorf("unsupported hash: %q", name) - } -} - -func (h *Hash) parse(unquoted string) error { - parts := strings.Split(unquoted, ":") - if len(parts) != 2 { - return fmt.Errorf("too many parts in hash: %s", unquoted) - } - - rest := strings.TrimLeft(parts[1], "0123456789abcdef") - if len(rest) != 0 { - return fmt.Errorf("found non-hex character in hash: %c", rest[0]) - } - - hasher, err := Hasher(parts[0]) - if err != nil { - return err - } - // Compare the hex to the expected size (2 hex characters per byte) - if len(parts[1]) != hasher.Size()*2 { - return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) - } - - h.Algorithm = parts[0] - h.Hex = parts[1] - return nil -} - -// SHA256 computes the Hash of the provided io.Reader's content. -func SHA256(r io.Reader) (Hash, int64, error) { - hasher := sha256.New() - n, err := io.Copy(hasher, r) - if err != nil { - return Hash{}, 0, err - } - return Hash{ - Algorithm: "sha256", - Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), - }, n, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go deleted file mode 100644 index 17b9839a6e7e..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Image defines the interface for interacting with an OCI v1 image. -type Image interface { - // Layers returns the ordered collection of filesystem layers that comprise this image. - // The order of the list is oldest/base layer first, and most-recent/top layer last. - Layers() ([]Layer, error) - - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) - - // ConfigName returns the hash of the image's config file. - ConfigName() (Hash, error) - - // ConfigFile returns this image's config file. - ConfigFile() (*ConfigFile, error) - - // RawConfigFile returns the serialized bytes of ConfigFile() - RawConfigFile() ([]byte, error) - - // Digest returns the sha256 of this image's manifest. - Digest() (Hash, error) - - // Manifest returns this image's Manifest object. - Manifest() (*Manifest, error) - - // RawManifest returns the serialized bytes of Manifest() - RawManifest() ([]byte, error) - - // LayerByDigest returns a Layer for interacting with a particular layer of - // the image, looking it up by "digest" (the compressed hash). - LayerByDigest(Hash) (Layer, error) - - // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" - // (the uncompressed hash). - LayerByDiffID(Hash) (Layer, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go deleted file mode 100644 index 604e6de3606b..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// ImageIndex defines the interface for interacting with an OCI image index. -type ImageIndex interface { - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) - - // Digest returns the sha256 of this index's manifest. - Digest() (Hash, error) - - // IndexManifest returns this image index's manifest object. - IndexManifest() (*IndexManifest, error) - - // RawManifest returns the serialized bytes of IndexManifest(). - RawManifest() ([]byte, error) - - // Image returns a v1.Image that this ImageIndex references. - Image(Hash) (Image, error) - - // ImageIndex returns a v1.ImageIndex that this ImageIndex references. - ImageIndex(Hash) (ImageIndex, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go deleted file mode 100644 index 8b5091e45b28..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "io" -) - -// Layer is an interface for accessing the properties of a particular layer of a v1.Image -type Layer interface { - // Digest returns the Hash of the compressed layer. - Digest() (Hash, error) - - // DiffID returns the Hash of the uncompressed layer. - DiffID() (Hash, error) - - // Compressed returns an io.ReadCloser for the compressed layer contents. - Compressed() (io.ReadCloser, error) - - // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. - Uncompressed() (io.ReadCloser, error) - - // Size returns the compressed size of the Layer. - Size() (int64, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go deleted file mode 100644 index 36c341df8b57..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "encoding/json" - "io" - - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Manifest represents the OCI image manifest in a structured way. -type Manifest struct { - SchemaVersion int64 `json:"schemaVersion,omitempty"` - MediaType types.MediaType `json:"mediaType"` - Config Descriptor `json:"config"` - Layers []Descriptor `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// IndexManifest represents an OCI image index in a structured way. -type IndexManifest struct { - SchemaVersion int64 `json:"schemaVersion"` - MediaType types.MediaType `json:"mediaType,omitempty"` - Manifests []Descriptor `json:"manifests"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// Descriptor holds a reference from the manifest to one of its constituent elements. -type Descriptor struct { - MediaType types.MediaType `json:"mediaType"` - Size int64 `json:"size"` - Digest Hash `json:"digest"` - URLs []string `json:"urls,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` - Platform *Platform `json:"platform,omitempty"` -} - -// ParseManifest parses the io.Reader's contents into a Manifest. -func ParseManifest(r io.Reader) (*Manifest, error) { - m := Manifest{} - if err := json.NewDecoder(r).Decode(&m); err != nil { - return nil, err - } - return &m, nil -} - -// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. -func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { - im := IndexManifest{} - if err := json.NewDecoder(r).Decode(&im); err != nil { - return nil, err - } - return &im, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go deleted file mode 100644 index 497d1af0dfad..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -// CompressedLayer represents the bare minimum interface a natively -// compressed layer must implement for us to produce a v1.Layer -type CompressedLayer interface { - // Digest returns the Hash of the compressed layer. - Digest() (v1.Hash, error) - - // Compressed returns an io.ReadCloser for the compressed layer contents. - Compressed() (io.ReadCloser, error) - - // Size returns the compressed size of the Layer. - Size() (int64, error) -} - -// compressedLayerExtender implements v1.Image using the compressed base properties. -type compressedLayerExtender struct { - CompressedLayer -} - -// Uncompressed implements v1.Layer -func (ule *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { - u, err := ule.Compressed() - if err != nil { - return nil, err - } - return v1util.GunzipReadCloser(u) -} - -// DiffID implements v1.Layer -func (ule *compressedLayerExtender) DiffID() (v1.Hash, error) { - // If our nested CompressedLayer implements DiffID, - // then delegate to it instead. - if wdi, ok := ule.CompressedLayer.(WithDiffID); ok { - return wdi.DiffID() - } - r, err := ule.Uncompressed() - if err != nil { - return v1.Hash{}, err - } - defer r.Close() - h, _, err := v1.SHA256(r) - return h, err -} - -// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer -func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { - return &compressedLayerExtender{ul}, nil -} - -// CompressedImageCore represents the base minimum interface a natively -// compressed image must implement for us to produce a v1.Image. -type CompressedImageCore interface { - imageCore - - // RawManifest returns the serialized bytes of the manifest. - RawManifest() ([]byte, error) - - // LayerByDigest is a variation on the v1.Image method, which returns - // a CompressedLayer instead. - LayerByDigest(v1.Hash) (CompressedLayer, error) -} - -// compressedImageExtender implements v1.Image by extending CompressedImageCore with the -// appropriate methods computed from the minimal core. -type compressedImageExtender struct { - CompressedImageCore -} - -// Assert that our extender type completes the v1.Image interface -var _ v1.Image = (*compressedImageExtender)(nil) - -// Digest implements v1.Image -func (i *compressedImageExtender) Digest() (v1.Hash, error) { - return Digest(i) -} - -// ConfigName implements v1.Image -func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { - return ConfigName(i) -} - -// Layers implements v1.Image -func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { - hs, err := FSLayers(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(hs)) - for _, h := range hs { - l, err := i.LayerByDigest(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// LayerByDigest implements v1.Image -func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - cl, err := i.CompressedImageCore.LayerByDigest(h) - if err != nil { - return nil, err - } - return CompressedToLayer(cl) -} - -// LayerByDiffID implements v1.Image -func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { - h, err := DiffIDToBlob(i, h) - if err != nil { - return nil, err - } - return i.LayerByDigest(h) -} - -// ConfigFile implements v1.Image -func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { - return ConfigFile(i) -} - -// Manifest implements v1.Image -func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { - return Manifest(i) -} - -// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image -func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { - return &compressedImageExtender{ - CompressedImageCore: cic, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go deleted file mode 100644 index 153dfe4d5384..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package partial defines methods for building up a v1.Image from -// minimal subsets that are sufficient for defining a v1.Image. -package partial diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go deleted file mode 100644 index 5d6da39d24ce..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// imageCore is the core set of properties without which we cannot build a v1.Image -type imageCore interface { - // RawConfigFile returns the serialized bytes of this image's config file. - RawConfigFile() ([]byte, error) - - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go deleted file mode 100644 index 9f75723ec5d0..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "bytes" - "io" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -// UncompressedLayer represents the bare minimum interface a natively -// uncompressed layer must implement for us to produce a v1.Layer -type UncompressedLayer interface { - // DiffID returns the Hash of the uncompressed layer. - DiffID() (v1.Hash, error) - - // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. - Uncompressed() (io.ReadCloser, error) -} - -// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. -type uncompressedLayerExtender struct { - UncompressedLayer - // Memoize size/hash so that the methods aren't twice as - // expensive as doing this manually. - hash v1.Hash - size int64 - hashSizeError error - once sync.Once -} - -// Compressed implements v1.Layer -func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { - u, err := ule.Uncompressed() - if err != nil { - return nil, err - } - return v1util.GzipReadCloser(u) -} - -// Digest implements v1.Layer -func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { - ule.calcSizeHash() - return ule.hash, ule.hashSizeError -} - -// Size implements v1.Layer -func (ule *uncompressedLayerExtender) Size() (int64, error) { - ule.calcSizeHash() - return ule.size, ule.hashSizeError -} - -func (ule *uncompressedLayerExtender) calcSizeHash() { - ule.once.Do(func() { - var r io.ReadCloser - r, ule.hashSizeError = ule.Compressed() - if ule.hashSizeError != nil { - return - } - defer r.Close() - ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) - }) -} - -// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer -func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { - return &uncompressedLayerExtender{UncompressedLayer: ul}, nil -} - -// UncompressedImageCore represents the bare minimum interface a natively -// uncompressed image must implement for us to produce a v1.Image -type UncompressedImageCore interface { - imageCore - - // LayerByDiffID is a variation on the v1.Image method, which returns - // an UncompressedLayer instead. - LayerByDiffID(v1.Hash) (UncompressedLayer, error) -} - -// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. -func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { - return &uncompressedImageExtender{ - UncompressedImageCore: uic, - }, nil -} - -// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the -// appropriate methods computed from the minimal core. -type uncompressedImageExtender struct { - UncompressedImageCore - - lock sync.Mutex - manifest *v1.Manifest -} - -// Assert that our extender type completes the v1.Image interface -var _ v1.Image = (*uncompressedImageExtender)(nil) - -// Digest implements v1.Image -func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { - return Digest(i) -} - -// Manifest implements v1.Image -func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { - i.lock.Lock() - defer i.lock.Unlock() - if i.manifest != nil { - return i.manifest, nil - } - - b, err := i.RawConfigFile() - if err != nil { - return nil, err - } - - cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - - m := &v1.Manifest{ - SchemaVersion: 2, - MediaType: types.DockerManifestSchema2, - Config: v1.Descriptor{ - MediaType: types.DockerConfigJSON, - Size: cfgSize, - Digest: cfgHash, - }, - } - - ls, err := i.Layers() - if err != nil { - return nil, err - } - - m.Layers = make([]v1.Descriptor, len(ls)) - for i, l := range ls { - sz, err := l.Size() - if err != nil { - return nil, err - } - h, err := l.Digest() - if err != nil { - return nil, err - } - - m.Layers[i] = v1.Descriptor{ - MediaType: types.DockerLayer, - Size: sz, - Digest: h, - } - } - - i.manifest = m - return i.manifest, nil -} - -// RawManifest implements v1.Image -func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { - return RawManifest(i) -} - -// ConfigName implements v1.Image -func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { - return ConfigName(i) -} - -// ConfigFile implements v1.Image -func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { - return ConfigFile(i) -} - -// Layers implements v1.Image -func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { - diffIDs, err := DiffIDs(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(diffIDs)) - for _, h := range diffIDs { - l, err := i.LayerByDiffID(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// LayerByDiffID implements v1.Image -func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { - ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) - if err != nil { - return nil, err - } - return UncompressedToLayer(ul) -} - -// LayerByDigest implements v1.Image -func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - diffID, err := BlobToDiffID(i, h) - if err != nil { - return nil, err - } - return i.LayerByDiffID(diffID) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go deleted file mode 100644 index f724ec8ab38d..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -// WithRawConfigFile defines the subset of v1.Image used by these helper methods -type WithRawConfigFile interface { - // RawConfigFile returns the serialized bytes of this image's config file. - RawConfigFile() ([]byte, error) -} - -// ConfigFile is a helper for implementing v1.Image -func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { - b, err := i.RawConfigFile() - if err != nil { - return nil, err - } - return v1.ParseConfigFile(bytes.NewReader(b)) -} - -// ConfigName is a helper for implementing v1.Image -func ConfigName(i WithRawConfigFile) (v1.Hash, error) { - b, err := i.RawConfigFile() - if err != nil { - return v1.Hash{}, err - } - h, _, err := v1.SHA256(bytes.NewReader(b)) - return h, err -} - -type configLayer struct { - hash v1.Hash - content []byte -} - -// Digest implements v1.Layer -func (cl *configLayer) Digest() (v1.Hash, error) { - return cl.hash, nil -} - -// DiffID implements v1.Layer -func (cl *configLayer) DiffID() (v1.Hash, error) { - return cl.hash, nil -} - -// Uncompressed implements v1.Layer -func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil -} - -// Compressed implements v1.Layer -func (cl *configLayer) Compressed() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil -} - -// Size implements v1.Layer -func (cl *configLayer) Size() (int64, error) { - return int64(len(cl.content)), nil -} - -var _ v1.Layer = (*configLayer)(nil) - -// ConfigLayer implements v1.Layer from the raw config bytes. -// This is so that clients (e.g. remote) can access the config as a blob. -func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { - h, err := ConfigName(i) - if err != nil { - return nil, err - } - rcfg, err := i.RawConfigFile() - if err != nil { - return nil, err - } - return &configLayer{ - hash: h, - content: rcfg, - }, nil -} - -// WithConfigFile defines the subset of v1.Image used by these helper methods -type WithConfigFile interface { - // ConfigFile returns this image's config file. - ConfigFile() (*v1.ConfigFile, error) -} - -// DiffIDs is a helper for implementing v1.Image -func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { - cfg, err := i.ConfigFile() - if err != nil { - return nil, err - } - return cfg.RootFS.DiffIDs, nil -} - -// RawConfigFile is a helper for implementing v1.Image -func RawConfigFile(i WithConfigFile) ([]byte, error) { - cfg, err := i.ConfigFile() - if err != nil { - return nil, err - } - return json.Marshal(cfg) -} - -// WithUncompressedLayer defines the subset of v1.Image used by these helper methods -type WithUncompressedLayer interface { - // UncompressedLayer is like UncompressedBlob, but takes the "diff id". - UncompressedLayer(v1.Hash) (io.ReadCloser, error) -} - -// Layer is the same as Blob, but takes the "diff id". -func Layer(wul WithUncompressedLayer, h v1.Hash) (io.ReadCloser, error) { - rc, err := wul.UncompressedLayer(h) - if err != nil { - return nil, err - } - return v1util.GzipReadCloser(rc) -} - -// WithRawManifest defines the subset of v1.Image used by these helper methods -type WithRawManifest interface { - // RawManifest returns the serialized bytes of this image's config file. - RawManifest() ([]byte, error) -} - -// Digest is a helper for implementing v1.Image -func Digest(i WithRawManifest) (v1.Hash, error) { - mb, err := i.RawManifest() - if err != nil { - return v1.Hash{}, err - } - digest, _, err := v1.SHA256(bytes.NewReader(mb)) - return digest, err -} - -// Manifest is a helper for implementing v1.Image -func Manifest(i WithRawManifest) (*v1.Manifest, error) { - b, err := i.RawManifest() - if err != nil { - return nil, err - } - return v1.ParseManifest(bytes.NewReader(b)) -} - -// WithManifest defines the subset of v1.Image used by these helper methods -type WithManifest interface { - // Manifest returns this image's Manifest object. - Manifest() (*v1.Manifest, error) -} - -// RawManifest is a helper for implementing v1.Image -func RawManifest(i WithManifest) ([]byte, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - return json.Marshal(m) -} - -// FSLayers is a helper for implementing v1.Image -func FSLayers(i WithManifest) ([]v1.Hash, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - fsl := make([]v1.Hash, len(m.Layers)) - for i, l := range m.Layers { - fsl[i] = l.Digest - } - return fsl, nil -} - -// BlobSize is a helper for implementing v1.Image -func BlobSize(i WithManifest, h v1.Hash) (int64, error) { - m, err := i.Manifest() - if err != nil { - return -1, err - } - for _, l := range m.Layers { - if l.Digest == h { - return l.Size, nil - } - } - return -1, fmt.Errorf("blob %v not found", h) -} - -// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods -type WithManifestAndConfigFile interface { - WithConfigFile - - // Manifest returns this image's Manifest object. - Manifest() (*v1.Manifest, error) -} - -// BlobToDiffID is a helper for mapping between compressed -// and uncompressed blob hashes. -func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { - blobs, err := FSLayers(i) - if err != nil { - return v1.Hash{}, err - } - diffIDs, err := DiffIDs(i) - if err != nil { - return v1.Hash{}, err - } - if len(blobs) != len(diffIDs) { - return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) - } - for i, blob := range blobs { - if blob == h { - return diffIDs[i], nil - } - } - return v1.Hash{}, fmt.Errorf("unknown blob %v", h) -} - -// DiffIDToBlob is a helper for mapping between uncompressed -// and compressed blob hashes. -func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { - blobs, err := FSLayers(wm) - if err != nil { - return v1.Hash{}, err - } - diffIDs, err := DiffIDs(wm) - if err != nil { - return v1.Hash{}, err - } - if len(blobs) != len(diffIDs) { - return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) - } - for i, diffID := range diffIDs { - if diffID == h { - return blobs[i], nil - } - } - return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) - -} - -// WithBlob defines the subset of v1.Image used by these helper methods -type WithBlob interface { - // Blob returns a ReadCloser for streaming the blob's content. - Blob(v1.Hash) (io.ReadCloser, error) -} - -// UncompressedBlob returns a ReadCloser for streaming the blob's content uncompressed. -func UncompressedBlob(b WithBlob, h v1.Hash) (io.ReadCloser, error) { - rc, err := b.Blob(h) - if err != nil { - return nil, err - } - return v1util.GunzipReadCloser(rc) -} - -// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. -type WithDiffID interface { - DiffID() (v1.Hash, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go deleted file mode 100644 index df9b2959e396..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Platform represents the target os/arch for an image. -type Platform struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go deleted file mode 100644 index 2032e276ea76..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// Delete removes the specified image reference from the remote registry. -func Delete(ref name.Reference, auth authn.Authenticator, t http.RoundTripper) error { - scopes := []string{ref.Scope(transport.DeleteScope)} - tr, err := transport.New(ref.Context().Registry, auth, t, scopes) - if err != nil { - return err - } - c := &http.Client{Transport: tr} - - u := url.URL{ - Scheme: ref.Context().Registry.Scheme(), - Host: ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), - } - - req, err := http.NewRequest(http.MethodDelete, u.String(), nil) - if err != nil { - return err - } - - resp, err := c.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK, http.StatusAccepted: - return nil - default: - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - return fmt.Errorf("unrecognized status code during DELETE: %v; %v", resp.Status, string(b)) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go deleted file mode 100644 index 846ba07cda1d..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package remote provides facilities for reading/writing v1.Images from/to -// a remote image registry. -package remote diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go deleted file mode 100644 index 1be0ad2ea412..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -var defaultPlatform = v1.Platform{ - Architecture: "amd64", - OS: "linux", -} - -// remoteImage accesses an image from a remote registry -type remoteImage struct { - fetcher - manifestLock sync.Mutex // Protects manifest - manifest []byte - configLock sync.Mutex // Protects config - config []byte - mediaType types.MediaType - platform v1.Platform -} - -// ImageOption is a functional option for Image. -type ImageOption func(*imageOpener) error - -var _ partial.CompressedImageCore = (*remoteImage)(nil) - -type imageOpener struct { - auth authn.Authenticator - transport http.RoundTripper - ref name.Reference - client *http.Client - platform v1.Platform -} - -func (i *imageOpener) Open() (v1.Image, error) { - tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)}) - if err != nil { - return nil, err - } - ri := &remoteImage{ - fetcher: fetcher{ - Ref: i.ref, - Client: &http.Client{Transport: tr}, - }, - platform: i.platform, - } - imgCore, err := partial.CompressedToImage(ri) - if err != nil { - return imgCore, err - } - // Wrap the v1.Layers returned by this v1.Image in a hint for downstream - // remote.Write calls to facilitate cross-repo "mounting". - return &mountableImage{ - Image: imgCore, - Reference: i.ref, - }, nil -} - -// Image provides access to a remote image reference, applying functional options -// to the underlying imageOpener before resolving the reference into a v1.Image. -func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) { - img := &imageOpener{ - auth: authn.Anonymous, - transport: http.DefaultTransport, - ref: ref, - platform: defaultPlatform, - } - - for _, option := range options { - if err := option(img); err != nil { - return nil, err - } - } - return img.Open() -} - -// fetcher implements methods for reading from a remote image. -type fetcher struct { - Ref name.Reference - Client *http.Client -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (f *fetcher) url(resource, identifier string) url.URL { - return url.URL{ - Scheme: f.Ref.Context().Registry.Scheme(), - Host: f.Ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), - } -} - -func (f *fetcher) fetchManifest(acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { - u := f.url("manifests", f.Ref.Identifier()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.Client.Do(req) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, nil, err - } - - manifest, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - - digest, size, err := v1.SHA256(bytes.NewReader(manifest)) - if err != nil { - return nil, nil, err - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := f.Ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) - } - } else { - // Do nothing for tags; I give up. - // - // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, - // but so many registries implement this incorrectly that it's not worth checking. - // - // For reference: - // https://github.com/docker/distribution/issues/2395 - // https://github.com/GoogleContainerTools/kaniko/issues/298 - } - - // Return all this info since we have to calculate it anyway. - desc := v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: types.MediaType(resp.Header.Get("Content-Type")), - } - - return manifest, &desc, nil -} - -func (r *remoteImage) MediaType() (types.MediaType, error) { - if string(r.mediaType) != "" { - return r.mediaType, nil - } - return types.DockerManifestSchema2, nil -} - -// TODO(jonjohnsonjr): Handle manifest lists. -func (r *remoteImage) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - acceptable := []types.MediaType{ - types.DockerManifestSchema2, - types.OCIManifestSchema1, - // We'll resolve these to an image based on the platform. - types.DockerManifestList, - types.OCIImageIndex, - } - manifest, desc, err := r.fetchManifest(acceptable) - if err != nil { - return nil, err - } - - // We want an image but the registry has an index, resolve it to an image. - for desc.MediaType == types.DockerManifestList || desc.MediaType == types.OCIImageIndex { - manifest, desc, err = r.matchImage(manifest) - if err != nil { - return nil, err - } - } - - r.mediaType = desc.MediaType - r.manifest = manifest - return r.manifest, nil -} - -func (r *remoteImage) RawConfigFile() ([]byte, error) { - r.configLock.Lock() - defer r.configLock.Unlock() - if r.config != nil { - return r.config, nil - } - - m, err := partial.Manifest(r) - if err != nil { - return nil, err - } - - cl, err := r.LayerByDigest(m.Config.Digest) - if err != nil { - return nil, err - } - body, err := cl.Compressed() - if err != nil { - return nil, err - } - defer body.Close() - - r.config, err = ioutil.ReadAll(body) - if err != nil { - return nil, err - } - return r.config, nil -} - -// remoteLayer implements partial.CompressedLayer -type remoteLayer struct { - ri *remoteImage - digest v1.Hash -} - -// Digest implements partial.CompressedLayer -func (rl *remoteLayer) Digest() (v1.Hash, error) { - return rl.digest, nil -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { - u := rl.ri.url("blobs", rl.digest.String()) - resp, err := rl.ri.Client.Get(u.String()) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - return v1util.VerifyReadCloser(resp.Body, rl.digest) -} - -// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. -func (rl *remoteLayer) Manifest() (*v1.Manifest, error) { - return partial.Manifest(rl.ri) -} - -// Size implements partial.CompressedLayer -func (rl *remoteLayer) Size() (int64, error) { - // Look up the size of this digest in the manifest to avoid a request. - return partial.BlobSize(rl, rl.digest) -} - -// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. -func (rl *remoteLayer) ConfigFile() (*v1.ConfigFile, error) { - return partial.ConfigFile(rl.ri) -} - -// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have -// available in our ConfigFile. -func (rl *remoteLayer) DiffID() (v1.Hash, error) { - return partial.BlobToDiffID(rl, rl.digest) -} - -// LayerByDigest implements partial.CompressedLayer -func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { - return &remoteLayer{ - ri: r, - digest: h, - }, nil -} - -// This naively matches the first manifest with matching Architecture and OS. -// -// We should probably use this instead: -// github.com/containerd/containerd/platforms -// -// But first we'd need to migrate to: -// github.com/opencontainers/image-spec/specs-go/v1 -func (r *remoteImage) matchImage(rawIndex []byte) ([]byte, *v1.Descriptor, error) { - index, err := v1.ParseIndexManifest(bytes.NewReader(rawIndex)) - if err != nil { - return nil, nil, err - } - for _, childDesc := range index.Manifests { - // If platform is missing from child descriptor, assume it's amd64/linux. - p := defaultPlatform - if childDesc.Platform != nil { - p = *childDesc.Platform - } - if r.platform.Architecture == p.Architecture && r.platform.OS == p.OS { - childRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), childDesc.Digest), name.StrictValidation) - if err != nil { - return nil, nil, err - } - r.fetcher = fetcher{ - Client: r.Client, - Ref: childRef, - } - return r.fetchManifest([]types.MediaType{childDesc.MediaType}) - } - } - return nil, nil, fmt.Errorf("no matching image for %s/%s, index: %s", r.platform.Architecture, r.platform.OS, string(rawIndex)) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go deleted file mode 100644 index 03afc481ad74..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "fmt" - "net/http" - "sync" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// remoteIndex accesses an index from a remote registry -type remoteIndex struct { - fetcher - manifestLock sync.Mutex // Protects manifest - manifest []byte - mediaType types.MediaType -} - -// Index provides access to a remote index reference, applying functional options -// to the underlying imageOpener before resolving the reference into a v1.ImageIndex. -func Index(ref name.Reference, options ...ImageOption) (v1.ImageIndex, error) { - i := &imageOpener{ - auth: authn.Anonymous, - transport: http.DefaultTransport, - ref: ref, - } - - for _, option := range options { - if err := option(i); err != nil { - return nil, err - } - } - tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)}) - if err != nil { - return nil, err - } - return &remoteIndex{ - fetcher: fetcher{ - Ref: i.ref, - Client: &http.Client{Transport: tr}, - }, - }, nil -} - -func (r *remoteIndex) MediaType() (types.MediaType, error) { - if string(r.mediaType) != "" { - return r.mediaType, nil - } - return types.DockerManifestList, nil -} - -func (r *remoteIndex) Digest() (v1.Hash, error) { - return partial.Digest(r) -} - -func (r *remoteIndex) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - acceptable := []types.MediaType{ - types.DockerManifestList, - types.OCIImageIndex, - } - manifest, desc, err := r.fetchManifest(acceptable) - if err != nil { - return nil, err - } - - r.mediaType = desc.MediaType - r.manifest = manifest - return r.manifest, nil -} - -func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { - b, err := r.RawManifest() - if err != nil { - return nil, err - } - return v1.ParseIndexManifest(bytes.NewReader(b)) -} - -func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { - imgRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation) - if err != nil { - return nil, err - } - ri := &remoteImage{ - fetcher: fetcher{ - Ref: imgRef, - Client: r.Client, - }, - } - imgCore, err := partial.CompressedToImage(ri) - if err != nil { - return imgCore, err - } - // Wrap the v1.Layers returned by this v1.Image in a hint for downstream - // remote.Write calls to facilitate cross-repo "mounting". - return &mountableImage{ - Image: imgCore, - Reference: r.Ref, - }, nil -} - -func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - idxRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation) - if err != nil { - return nil, err - } - return &remoteIndex{ - fetcher: fetcher{ - Ref: idxRef, - Client: r.Client, - }, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go deleted file mode 100644 index 1a36d0a4ba1f..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -type tags struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// List calls /tags/list for the given repository. -func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) { - scopes := []string{repo.Scope(transport.PullScope)} - tr, err := transport.New(repo.Registry, auth, t, scopes) - if err != nil { - return nil, err - } - - uri := url.URL{ - Scheme: repo.Registry.Scheme(), - Host: repo.Registry.RegistryStr(), - Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), - } - - client := http.Client{Transport: tr} - resp, err := client.Get(uri.String()) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - parsed := tags{} - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - return parsed.Tags, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go deleted file mode 100644 index 3afda2a3417d..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// MountableLayer wraps a v1.Layer in a shim that enables the layer to be -// "mounted" when published to another registry. -type MountableLayer struct { - v1.Layer - - Reference name.Reference -} - -// mountableImage wraps the v1.Layer references returned by the embedded v1.Image -// in MountableLayer's so that remote.Write might attempt to mount them from their -// source repository. -type mountableImage struct { - v1.Image - - Reference name.Reference -} - -// Layers implements v1.Image -func (mi *mountableImage) Layers() ([]v1.Layer, error) { - ls, err := mi.Image.Layers() - if err != nil { - return nil, err - } - mls := make([]v1.Layer, 0, len(ls)) - for _, l := range ls { - mls = append(mls, &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }) - } - return mls, nil -} - -// LayerByDigest implements v1.Image -func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { - l, err := mi.Image.LayerByDigest(d) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} - -// LayerByDiffID implements v1.Image -func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { - l, err := mi.Image.LayerByDiffID(d) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go deleted file mode 100644 index 335e3fe5beb2..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "log" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// WithTransport is a functional option for overriding the default transport -// on a remote image -func WithTransport(t http.RoundTripper) ImageOption { - return func(i *imageOpener) error { - i.transport = t - return nil - } -} - -// WithAuth is a functional option for overriding the default authenticator -// on a remote image -func WithAuth(auth authn.Authenticator) ImageOption { - return func(i *imageOpener) error { - i.auth = auth - return nil - } -} - -// WithAuthFromKeychain is a functional option for overriding the default -// authenticator on a remote image using an authn.Keychain -func WithAuthFromKeychain(keys authn.Keychain) ImageOption { - return func(i *imageOpener) error { - auth, err := keys.Resolve(i.ref.Context().Registry) - if err != nil { - return err - } - if auth == authn.Anonymous { - log.Println("No matching credentials were found, falling back on anonymous") - } - i.auth = auth - return nil - } -} - -func WithPlatform(p v1.Platform) ImageOption { - return func(i *imageOpener) error { - i.platform = p - return nil - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go deleted file mode 100644 index e77f47f69905..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" -) - -type basicTransport struct { - inner http.RoundTripper - auth authn.Authenticator - target string -} - -var _ http.RoundTripper = (*basicTransport)(nil) - -// RoundTrip implements http.RoundTripper -func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { - hdr, err := bt.auth.Authorization() - if err != nil { - return nil, err - } - - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the host with which we are interacting. - // In case of redirect http.Client can use an empty Host, check URL too. - if in.Host == bt.target || in.URL.Host == bt.target { - in.Header.Set("Authorization", hdr) - } - in.Header.Set("User-Agent", transportName) - return bt.inner.RoundTrip(in) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go deleted file mode 100644 index f72ab276d6ec..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -type bearerTransport struct { - // Wrapped by bearerTransport. - inner http.RoundTripper - // Basic credentials that we exchange for bearer tokens. - basic authn.Authenticator - // Holds the bearer response from the token service. - bearer *authn.Bearer - // Registry to which we send bearer tokens. - registry name.Registry - // See https://tools.ietf.org/html/rfc6750#section-3 - realm string - // See https://docs.docker.com/registry/spec/auth/token/ - service string - scopes []string - // Scheme we should use, determined by ping response. - scheme string -} - -var _ http.RoundTripper = (*bearerTransport)(nil) - -// RoundTrip implements http.RoundTripper -func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { - sendRequest := func() (*http.Response, error) { - hdr, err := bt.bearer.Authorization() - if err != nil { - return nil, err - } - - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the registry with which we are interacting. - // In case of redirect http.Client can use an empty Host, check URL too. - if in.Host == bt.registry.RegistryStr() || in.URL.Host == bt.registry.RegistryStr() { - in.Header.Set("Authorization", hdr) - } - in.Header.Set("User-Agent", transportName) - - in.URL.Scheme = bt.scheme - return bt.inner.RoundTrip(in) - } - - res, err := sendRequest() - if err != nil { - return nil, err - } - - // Perform a token refresh() and retry the request in case the token has expired - if res.StatusCode == http.StatusUnauthorized { - if err = bt.refresh(); err != nil { - return nil, err - } - return sendRequest() - } - - return res, err -} - -func (bt *bearerTransport) refresh() error { - u, err := url.Parse(bt.realm) - if err != nil { - return err - } - b := &basicTransport{ - inner: bt.inner, - auth: bt.basic, - target: u.Host, - } - client := http.Client{Transport: b} - - u.RawQuery = url.Values{ - "scope": bt.scopes, - "service": []string{bt.service}, - }.Encode() - - resp, err := client.Get(u.String()) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := CheckError(resp, http.StatusOK); err != nil { - return err - } - - content, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - // Some registries don't have "token" in the response. See #54. - type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - } - - var response tokenResponse - if err := json.Unmarshal(content, &response); err != nil { - return err - } - - // Find a token to turn into a Bearer authenticator - var bearer authn.Bearer - if response.Token != "" { - bearer = authn.Bearer{Token: response.Token} - } else if response.AccessToken != "" { - bearer = authn.Bearer{Token: response.AccessToken} - } else { - return fmt.Errorf("no token in bearer response:\n%s", content) - } - - // Replace our old bearer authenticator (if we had one) with our newly refreshed authenticator. - bt.bearer = &bearer - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go deleted file mode 100644 index ff7025b5c09e..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport provides facilities for setting up an authenticated -// http.RoundTripper given an Authenticator and base RoundTripper. See -// transport.New for more information. -package transport diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go deleted file mode 100644 index 44885effa51f..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -// Error implements error to support the following error specification: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors -type Error struct { - Errors []Diagnostic `json:"errors,omitempty"` -} - -// Check that Error implements error -var _ error = (*Error)(nil) - -// Error implements error -func (e *Error) Error() string { - switch len(e.Errors) { - case 0: - return "" - case 1: - return e.Errors[0].String() - default: - var errors []string - for _, d := range e.Errors { - errors = append(errors, d.String()) - } - return fmt.Sprintf("multiple errors returned: %s", - strings.Join(errors, ";")) - } -} - -// Diagnostic represents a single error returned by a Docker registry interaction. -type Diagnostic struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` -} - -// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] -func (d Diagnostic) String() string { - msg := fmt.Sprintf("%s: %s", d.Code, d.Message) - if d.Detail != nil { - msg = fmt.Sprintf("%s; %v", msg, d.Detail) - } - return msg -} - -// ErrorCode is an enumeration of supported error codes. -type ErrorCode string - -// The set of error conditions a registry may return: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 -const ( - BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" - BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" - BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" - DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" - ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" - ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" - ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" - ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" - NameInvalidErrorCode ErrorCode = "NAME_INVALID" - NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" - SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" - TagInvalidErrorCode ErrorCode = "TAG_INVALID" - UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" - DeniedErrorCode ErrorCode = "DENIED" - UnsupportedErrorCode ErrorCode = "UNSUPPORTED" -) - -// CheckError returns a structured error if the response status is not in codes. -func CheckError(resp *http.Response, codes ...int) error { - for _, code := range codes { - if resp.StatusCode == code { - // This is one of the supported status codes. - return nil - } - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors - var structuredError Error - if err := json.Unmarshal(b, &structuredError); err != nil { - // If the response isn't an unstructured error, then return some - // reasonable error response containing the response body. - return fmt.Errorf("unsupported status code %d; body: %s", resp.StatusCode, string(b)) - } - return &structuredError -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go deleted file mode 100644 index cc0d2cfeaa91..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - "strings" - - "github.com/google/go-containerregistry/pkg/name" -) - -type challenge string - -const ( - anonymous challenge = "anonymous" - basic challenge = "basic" - bearer challenge = "bearer" -) - -type pingResp struct { - challenge challenge - - // Following the challenge there are often key/value pairs - // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" - parameters map[string]string - - // The registry's scheme to use. Communicates whether we fell back to http. - scheme string -} - -func (c challenge) Canonical() challenge { - return challenge(strings.ToLower(string(c))) -} - -func parseChallenge(suffix string) map[string]string { - kv := make(map[string]string) - for _, token := range strings.Split(suffix, ",") { - // Trim any whitespace around each token. - token = strings.Trim(token, " ") - - // Break the token into a key/value pair - if parts := strings.SplitN(token, "=", 2); len(parts) == 2 { - // Unquote the value, if it is quoted. - kv[parts[0]] = strings.Trim(parts[1], `"`) - } else { - // If there was only one part, treat is as a key with an empty value - kv[token] = "" - } - } - return kv -} - -func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) { - client := http.Client{Transport: t} - - // This first attempts to use "https" for every request, falling back to http - // if the registry matches our localhost heuristic or if it is intentionally - // set to insecure via name.NewInsecureRegistry. - schemes := []string{"https"} - if reg.Scheme() == "http" { - schemes = append(schemes, "http") - } - - var connErr error - for _, scheme := range schemes { - url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) - resp, err := client.Get(url) - if err != nil { - connErr = err - // Potentially retry with http. - continue - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // If we get a 200, then no authentication is needed. - return &pingResp{ - challenge: anonymous, - scheme: scheme, - }, nil - case http.StatusUnauthorized: - wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) - if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { - // If there are two parts, then parse the challenge parameters. - return &pingResp{ - challenge: challenge(parts[0]).Canonical(), - parameters: parseChallenge(parts[1]), - scheme: scheme, - }, nil - } - // Otherwise, just return the challenge without parameters. - return &pingResp{ - challenge: challenge(wac).Canonical(), - scheme: scheme, - }, nil - default: - return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) - } - } - return nil, connErr -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go deleted file mode 100644 index c3b56f7a41c1..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -// Scopes suitable to qualify each Repository -const ( - PullScope string = "pull" - PushScope string = "push,pull" - // For now DELETE is PUSH, which is the read/write ACL. - DeleteScope string = PushScope - CatalogScope string = "catalog" -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go deleted file mode 100644 index 18c8e66c7525..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -const ( - transportName = "go-containerregistry" -) - -// New returns a new RoundTripper based on the provided RoundTripper that has been -// setup to authenticate with the remote registry "reg", in the capacity -// laid out by the specified scopes. -func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { - // The handshake: - // 1. Use "t" to ping() the registry for the authentication challenge. - // - // 2a. If we get back a 200, then simply use "t". - // - // 2b. If we get back a 401 with a Basic challenge, then use a transport - // that just attachs auth each roundtrip. - // - // 2c. If we get back a 401 with a Bearer challenge, then use a transport - // that attaches a bearer token to each request, and refreshes is on 401s. - // Perform an initial refresh to seed the bearer token. - - // First we ping the registry to determine the parameters of the authentication handshake - // (if one is even necessary). - pr, err := ping(reg, t) - if err != nil { - return nil, err - } - - switch pr.challenge.Canonical() { - case anonymous: - return t, nil - case basic: - return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil - case bearer: - // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. - realm, ok := pr.parameters["realm"] - if !ok { - return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) - } - service, ok := pr.parameters["service"] - if !ok { - // If the service parameter is not specified, then default it to the registry - // with which we are talking. - service = reg.String() - } - bt := &bearerTransport{ - inner: t, - basic: auth, - realm: realm, - registry: reg, - service: service, - scopes: scopes, - scheme: pr.scheme, - } - if err := bt.refresh(); err != nil { - return nil, err - } - return bt, nil - default: - return nil, fmt.Errorf("Unrecognized challenge: %s", pr.challenge) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go deleted file mode 100644 index 66f148155d7e..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "errors" - "fmt" - "io" - "log" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" - "golang.org/x/sync/errgroup" -) - -type manifest interface { - RawManifest() ([]byte, error) - MediaType() (types.MediaType, error) - Digest() (v1.Hash, error) -} - -// Write pushes the provided img to the specified image reference. -func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error { - ls, err := img.Layers() - if err != nil { - return err - } - - scopes := scopesForUploadingImage(ref, ls) - tr, err := transport.New(ref.Context().Registry, auth, t, scopes) - if err != nil { - return err - } - w := writer{ - ref: ref, - client: &http.Client{Transport: tr}, - } - - // Upload individual layers in goroutines and collect any errors. - // If we can dedupe by the layer digest, try to do so. If the layer is - // a stream.Layer, we can't dedupe and might re-upload. - var g errgroup.Group - uploaded := map[v1.Hash]bool{} - for _, l := range ls { - l := l - if _, ok := l.(*stream.Layer); !ok { - h, err := l.Digest() - if err != nil { - return err - } - // If we can determine the layer's digest ahead of - // time, use it to dedupe uploads. - if uploaded[h] { - continue // Already uploading. - } - uploaded[h] = true - } - - g.Go(func() error { - return w.uploadOne(l) - }) - } - - if l, err := partial.ConfigLayer(img); err == stream.ErrNotComputed { - // We can't read the ConfigLayer, because of streaming layers, since the - // config hasn't been calculated yet. - if err := g.Wait(); err != nil { - return err - } - - // Now that all the layers are uploaded, upload the config file blob. - l, err := partial.ConfigLayer(img) - if err != nil { - return err - } - if err := w.uploadOne(l); err != nil { - return err - } - } else if err != nil { - // This is an actual error, not a streaming error, just return it. - return err - } else { - // We *can* read the ConfigLayer, so upload it concurrently with the layers. - g.Go(func() error { - return w.uploadOne(l) - }) - - // Wait for the layers + config. - if err := g.Wait(); err != nil { - return err - } - } - - // With all of the constituent elements uploaded, upload the manifest - // to commit the image. - return w.commitImage(img) -} - -// writer writes the elements of an image to a remote image reference. -type writer struct { - ref name.Reference - client *http.Client -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (w *writer) url(path string) url.URL { - return url.URL{ - Scheme: w.ref.Context().Registry.Scheme(), - Host: w.ref.Context().RegistryStr(), - Path: path, - } -} - -// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. -func (w *writer) nextLocation(resp *http.Response) (string, error) { - loc := resp.Header.Get("Location") - if len(loc) == 0 { - return "", errors.New("missing Location header") - } - u, err := url.Parse(loc) - if err != nil { - return "", err - } - - // If the location header returned is just a url path, then fully qualify it. - // We cannot simply call w.url, since there might be an embedded query string. - return resp.Request.URL.ResolveReference(u).String(), nil -} - -// checkExistingBlob checks if a blob exists already in the repository by making a -// HEAD request to the blob store API. GCR performs an existence check on the -// initiation if "mount" is specified, even if no "from" sources are specified. -// However, this is not broadly applicable to all registries, e.g. ECR. -func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { - u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String())) - - resp, err := w.client.Head(u.String()) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - -// checkExistingManifest checks if a manifest exists already in the repository -// by making a HEAD request to the manifest API. -func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) { - u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), h.String())) - - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - req.Header.Set("Accept", string(mt)) - - resp, err := w.client.Do(req) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - -// initiateUpload initiates the blob upload, which starts with a POST that can -// optionally include the hash of the layer and a list of repositories from -// which that layer might be read. On failure, an error is returned. -// On success, the layer was either mounted (nothing more to do) or a blob -// upload was initiated and the body of that blob should be sent to the returned -// location. -func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { - u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr())) - uv := url.Values{} - if mount != "" && from != "" { - // Quay will fail if we specify a "mount" without a "from". - uv["mount"] = []string{mount} - uv["from"] = []string{from} - } - u.RawQuery = uv.Encode() - - // Make the request to initiate the blob upload. - resp, err := w.client.Post(u.String(), "application/json", nil) - if err != nil { - return "", false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { - return "", false, err - } - - // Check the response code to determine the result. - switch resp.StatusCode { - case http.StatusCreated: - // We're done, we were able to fast-path. - return "", true, nil - case http.StatusAccepted: - // Proceed to PATCH, upload has begun. - loc, err := w.nextLocation(resp) - return loc, false, err - default: - panic("Unreachable: initiateUpload") - } -} - -// streamBlob streams the contents of the blob to the specified location. -// On failure, this will return an error. On success, this will return the location -// header indicating how to commit the streamed blob. -func (w *writer) streamBlob(blob io.ReadCloser, streamLocation string) (commitLocation string, err error) { - defer blob.Close() - - req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) - if err != nil { - return "", err - } - - resp, err := w.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { - return "", err - } - - // The blob has been uploaded, return the location header indicating - // how to commit this layer. - return w.nextLocation(resp) -} - -// commitBlob commits this blob by sending a PUT to the location returned from -// streaming the blob. -func (w *writer) commitBlob(location, digest string) error { - u, err := url.Parse(location) - if err != nil { - return err - } - v := u.Query() - v.Set("digest", digest) - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPut, u.String(), nil) - if err != nil { - return err - } - - resp, err := w.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - return transport.CheckError(resp, http.StatusCreated) -} - -// uploadOne performs a complete upload of a single layer. -func (w *writer) uploadOne(l v1.Layer) error { - var from, mount, digest string - if _, ok := l.(*stream.Layer); !ok { - // Layer isn't streamable, we should take advantage of that to - // skip uploading if possible. - // By sending ?digest= in the request, we'll also check that - // our computed digest matches the one computed by the - // registry. - h, err := l.Digest() - if err != nil { - return err - } - digest = h.String() - - existing, err := w.checkExistingBlob(h) - if err != nil { - return err - } - if existing { - log.Printf("existing blob: %v", h) - return nil - } - - mount = h.String() - } - if ml, ok := l.(*MountableLayer); ok { - if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() { - from = ml.Reference.Context().RepositoryStr() - } - } - - location, mounted, err := w.initiateUpload(from, mount) - if err != nil { - return err - } else if mounted { - h, err := l.Digest() - if err != nil { - return err - } - log.Printf("mounted blob: %s", h.String()) - return nil - } - - blob, err := l.Compressed() - if err != nil { - return err - } - location, err = w.streamBlob(blob, location) - if err != nil { - return err - } - - h, err := l.Digest() - if err != nil { - return err - } - digest = h.String() - - if err := w.commitBlob(location, digest); err != nil { - return err - } - log.Printf("pushed blob: %s", digest) - return nil -} - -// commitImage does a PUT of the image's manifest. -func (w *writer) commitImage(man manifest) error { - raw, err := man.RawManifest() - if err != nil { - return err - } - mt, err := man.MediaType() - if err != nil { - return err - } - - u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), w.ref.Identifier())) - - // Make the request to PUT the serialized manifest - req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) - if err != nil { - return err - } - req.Header.Set("Content-Type", string(mt)) - - resp, err := w.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { - return err - } - - digest, err := man.Digest() - if err != nil { - return err - } - - // The image was successfully pushed! - log.Printf("%v: digest: %v size: %d", w.ref, digest, len(raw)) - return nil -} - -func scopesForUploadingImage(ref name.Reference, layers []v1.Layer) []string { - // use a map as set to remove duplicates scope strings - scopeSet := map[string]struct{}{} - - for _, l := range layers { - if ml, ok := l.(*MountableLayer); ok { - // we add push scope for ref.Context() after the loop - if ml.Reference.Context() != ref.Context() { - scopeSet[ml.Reference.Context().Scope(transport.PullScope)] = struct{}{} - } - } - } - - scopes := make([]string, 0) - // Push scope should be the first element because a few registries just look at the first scope to determine access. - scopes = append(scopes, ref.Scope(transport.PushScope)) - - for scope := range scopeSet { - scopes = append(scopes, scope) - } - - return scopes -} - -// WriteIndex pushes the provided ImageIndex to the specified image reference. -// WriteIndex will attempt to push all of the referenced manifests before -// attempting to push the ImageIndex, to retain referential integrity. -func WriteIndex(ref name.Reference, ii v1.ImageIndex, auth authn.Authenticator, t http.RoundTripper) error { - index, err := ii.IndexManifest() - if err != nil { - return err - } - - scopes := []string{ref.Scope(transport.PushScope)} - tr, err := transport.New(ref.Context().Registry, auth, t, scopes) - if err != nil { - return err - } - w := writer{ - ref: ref, - client: &http.Client{Transport: tr}, - } - - for _, desc := range index.Manifests { - ref, err := name.ParseReference(fmt.Sprintf("%s@%s", ref.Context(), desc.Digest), name.StrictValidation) - if err != nil { - return err - } - exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType) - if err != nil { - return err - } - if exists { - log.Printf("existing manifest: %v", desc.Digest) - continue - } - - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - ii, err := ii.ImageIndex(desc.Digest) - if err != nil { - return err - } - - if err := WriteIndex(ref, ii, auth, t); err != nil { - return err - } - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := ii.Image(desc.Digest) - if err != nil { - return err - } - if err := Write(ref, img, auth, t); err != nil { - return err - } - } - } - - // With all of the constituent elements uploaded, upload the manifest - // to commit the image. - return w.commitImage(ii) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go deleted file mode 100644 index f8895a22622d..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stream - -import ( - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "errors" - "hash" - "io" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -var ( - // ErrNotComputed is returned when the requested value is not yet - // computed because the stream has not been consumed yet. - ErrNotComputed = errors.New("value not computed until stream is consumed") - - // ErrConsumed is returned by Compressed when the underlying stream has - // already been consumed and closed. - ErrConsumed = errors.New("stream was already consumed") -) - -// Layer is a streaming implementation of v1.Layer. -type Layer struct { - blob io.ReadCloser - consumed bool - - mu sync.Mutex - digest, diffID *v1.Hash - size int64 -} - -var _ v1.Layer = (*Layer)(nil) - -// NewLayer creates a Layer from an io.ReadCloser. -func NewLayer(rc io.ReadCloser) *Layer { return &Layer{blob: rc} } - -// Digest implements v1.Layer. -func (l *Layer) Digest() (v1.Hash, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.digest == nil { - return v1.Hash{}, ErrNotComputed - } - return *l.digest, nil -} - -// DiffID implements v1.Layer. -func (l *Layer) DiffID() (v1.Hash, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.diffID == nil { - return v1.Hash{}, ErrNotComputed - } - return *l.diffID, nil -} - -// Size implements v1.Layer. -func (l *Layer) Size() (int64, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.size == 0 { - return 0, ErrNotComputed - } - return l.size, nil -} - -// Uncompressed implements v1.Layer. -func (l *Layer) Uncompressed() (io.ReadCloser, error) { - return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") -} - -// Compressed implements v1.Layer. -func (l *Layer) Compressed() (io.ReadCloser, error) { - if l.consumed { - return nil, ErrConsumed - } - return newCompressedReader(l) -} - -type compressedReader struct { - closer io.Closer // original blob's Closer. - - h, zh hash.Hash // collects digests of compressed and uncompressed stream. - pr io.Reader - count *countWriter - - l *Layer // stream.Layer to update upon Close. -} - -func newCompressedReader(l *Layer) (*compressedReader, error) { - h := sha256.New() - zh := sha256.New() - count := &countWriter{} - - // gzip.Writer writes to the output stream via pipe, a hasher to - // capture compressed digest, and a countWriter to capture compressed - // size. - pr, pw := io.Pipe() - zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), gzip.BestSpeed) - if err != nil { - return nil, err - } - - cr := &compressedReader{ - closer: newMultiCloser(zw, l.blob), - pr: pr, - h: h, - zh: zh, - count: count, - l: l, - } - go func() { - if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil { - pw.CloseWithError(err) - return - } - // Now close the compressed reader, to flush the gzip stream - // and calculate digest/diffID/size. This will cause pr to - // return EOF which will cause readers of the Compressed stream - // to finish reading. - pw.CloseWithError(cr.Close()) - }() - - return cr, nil -} - -func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } - -func (cr *compressedReader) Close() error { - cr.l.mu.Lock() - defer cr.l.mu.Unlock() - - // Close the inner ReadCloser. - if err := cr.closer.Close(); err != nil { - return err - } - - diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil))) - if err != nil { - return err - } - cr.l.diffID = &diffID - - digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil))) - if err != nil { - return err - } - cr.l.digest = &digest - - cr.l.size = cr.count.n - cr.l.consumed = true - return nil -} - -// countWriter counts bytes written to it. -type countWriter struct{ n int64 } - -func (c *countWriter) Write(p []byte) (int, error) { - c.n += int64(len(p)) - return len(p), nil -} - -// multiCloser is a Closer that collects multiple Closers and Closes them in order. -type multiCloser []io.Closer - -var _ io.Closer = (multiCloser)(nil) - -func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) } - -func (m multiCloser) Close() error { - for _, c := range m { - if err := c.Close(); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go deleted file mode 100644 index 4eb79bb4e5a8..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tarball provides facilities for reading/writing v1.Images from/to -// a tarball on-disk. -package tarball diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go deleted file mode 100644 index ced18735c85e..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "sync" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -type image struct { - opener Opener - td *tarDescriptor - config []byte - imgDescriptor *singleImageTarDescriptor - - tag *name.Tag -} - -type uncompressedImage struct { - *image -} - -type compressedImage struct { - *image - manifestLock sync.Mutex // Protects manifest - manifest *v1.Manifest -} - -var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) -var _ partial.CompressedImageCore = (*compressedImage)(nil) - -// Opener is a thunk for opening a tar file. -type Opener func() (io.ReadCloser, error) - -func pathOpener(path string) Opener { - return func() (io.ReadCloser, error) { - return os.Open(path) - } -} - -// ImageFromPath returns a v1.Image from a tarball located on path. -func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { - return Image(pathOpener(path), tag) -} - -// Image exposes an image from the tarball at the provided path. -func Image(opener Opener, tag *name.Tag) (v1.Image, error) { - img := &image{ - opener: opener, - tag: tag, - } - if err := img.loadTarDescriptorAndConfig(); err != nil { - return nil, err - } - - // Peek at the first layer and see if it's compressed. - compressed, err := img.areLayersCompressed() - if err != nil { - return nil, err - } - if compressed { - c := compressedImage{ - image: img, - } - return partial.CompressedToImage(&c) - } - - uc := uncompressedImage{ - image: img, - } - return partial.UncompressedToImage(&uc) -} - -func (i *image) MediaType() (types.MediaType, error) { - return types.DockerManifestSchema2, nil -} - -// singleImageTarDescriptor is the struct used to represent a single image inside a `docker save` tarball. -type singleImageTarDescriptor struct { - Config string - RepoTags []string - Layers []string -} - -// tarDescriptor is the struct used inside the `manifest.json` file of a `docker save` tarball. -type tarDescriptor []singleImageTarDescriptor - -func (td tarDescriptor) findSpecifiedImageDescriptor(tag *name.Tag) (*singleImageTarDescriptor, error) { - if tag == nil { - if len(td) != 1 { - return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") - } - return &(td)[0], nil - } - for _, img := range td { - for _, tagStr := range img.RepoTags { - repoTag, err := name.NewTag(tagStr, name.WeakValidation) - if err != nil { - return nil, err - } - - // Compare the resolved names, since there are several ways to specify the same tag. - if repoTag.Name() == tag.Name() { - return &img, nil - } - } - } - return nil, fmt.Errorf("tag %s not found in tarball", tag) -} - -func (i *image) areLayersCompressed() (bool, error) { - if len(i.imgDescriptor.Layers) == 0 { - return false, errors.New("0 layers found in image") - } - layer := i.imgDescriptor.Layers[0] - blob, err := extractFileFromTar(i.opener, layer) - if err != nil { - return false, err - } - defer blob.Close() - return v1util.IsGzipped(blob) -} - -func (i *image) loadTarDescriptorAndConfig() error { - td, err := extractFileFromTar(i.opener, "manifest.json") - if err != nil { - return err - } - defer td.Close() - - if err := json.NewDecoder(td).Decode(&i.td); err != nil { - return err - } - - i.imgDescriptor, err = i.td.findSpecifiedImageDescriptor(i.tag) - if err != nil { - return err - } - - cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) - if err != nil { - return err - } - defer cfg.Close() - - i.config, err = ioutil.ReadAll(cfg) - if err != nil { - return err - } - return nil -} - -func (i *image) RawConfigFile() ([]byte, error) { - return i.config, nil -} - -// tarFile represents a single file inside a tar. Closing it closes the tar itself. -type tarFile struct { - io.Reader - io.Closer -} - -func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { - f, err := opener() - if err != nil { - return nil, err - } - tf := tar.NewReader(f) - for { - hdr, err := tf.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - if hdr.Name == filePath { - return tarFile{ - Reader: tf, - Closer: f, - }, nil - } - } - return nil, fmt.Errorf("file %s not found in tar", filePath) -} - -// uncompressedLayerFromTarball implements partial.UncompressedLayer -type uncompressedLayerFromTarball struct { - diffID v1.Hash - opener Opener - filePath string -} - -// DiffID implements partial.UncompressedLayer -func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { - return ulft.diffID, nil -} - -// Uncompressed implements partial.UncompressedLayer -func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { - return extractFileFromTar(ulft.opener, ulft.filePath) -} - -func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { - cfg, err := partial.ConfigFile(i) - if err != nil { - return nil, err - } - for idx, diffID := range cfg.RootFS.DiffIDs { - if diffID == h { - return &uncompressedLayerFromTarball{ - diffID: diffID, - opener: i.opener, - filePath: i.imgDescriptor.Layers[idx], - }, nil - } - } - return nil, fmt.Errorf("diff id %q not found", h) -} - -func (c *compressedImage) Manifest() (*v1.Manifest, error) { - c.manifestLock.Lock() - defer c.manifestLock.Unlock() - if c.manifest != nil { - return c.manifest, nil - } - - b, err := c.RawConfigFile() - if err != nil { - return nil, err - } - - cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - - c.manifest = &v1.Manifest{ - SchemaVersion: 2, - MediaType: types.DockerManifestSchema2, - Config: v1.Descriptor{ - MediaType: types.DockerConfigJSON, - Size: cfgSize, - Digest: cfgHash, - }, - } - - for _, p := range c.imgDescriptor.Layers { - l, err := extractFileFromTar(c.opener, p) - if err != nil { - return nil, err - } - defer l.Close() - sha, size, err := v1.SHA256(l) - if err != nil { - return nil, err - } - c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ - MediaType: types.DockerLayer, - Size: size, - Digest: sha, - }) - } - return c.manifest, nil -} - -func (c *compressedImage) RawManifest() ([]byte, error) { - return partial.RawManifest(c) -} - -// compressedLayerFromTarball implements partial.CompressedLayer -type compressedLayerFromTarball struct { - digest v1.Hash - opener Opener - filePath string -} - -// Digest implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { - return clft.digest, nil -} - -// Compressed implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { - return extractFileFromTar(clft.opener, clft.filePath) -} - -// Size implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) Size() (int64, error) { - r, err := clft.Compressed() - if err != nil { - return -1, err - } - defer r.Close() - _, i, err := v1.SHA256(r) - return i, err -} - -func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { - m, err := c.Manifest() - if err != nil { - return nil, err - } - for i, l := range m.Layers { - if l.Digest == h { - fp := c.imgDescriptor.Layers[i] - return &compressedLayerFromTarball{ - digest: h, - opener: c.opener, - filePath: fp, - }, nil - } - } - return nil, fmt.Errorf("blob %v not found", h) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go deleted file mode 100644 index 00256e8f2eee..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "compress/gzip" - "io" - "io/ioutil" - "os" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -type layer struct { - digest v1.Hash - diffID v1.Hash - size int64 - opener Opener - compressed bool -} - -func (l *layer) Digest() (v1.Hash, error) { - return l.digest, nil -} - -func (l *layer) DiffID() (v1.Hash, error) { - return l.diffID, nil -} - -func (l *layer) Compressed() (io.ReadCloser, error) { - rc, err := l.opener() - if err == nil && !l.compressed { - return v1util.GzipReadCloser(rc) - } - - return rc, err -} - -func (l *layer) Uncompressed() (io.ReadCloser, error) { - rc, err := l.opener() - if err == nil && l.compressed { - return v1util.GunzipReadCloser(rc) - } - - return rc, err -} - -func (l *layer) Size() (int64, error) { - return l.size, nil -} - -// LayerFromFile returns a v1.Layer given a tarball -func LayerFromFile(path string) (v1.Layer, error) { - opener := func() (io.ReadCloser, error) { - return os.Open(path) - } - return LayerFromOpener(opener) -} - -// LayerFromOpener returns a v1.Layer given an Opener function -func LayerFromOpener(opener Opener) (v1.Layer, error) { - rc, err := opener() - if err != nil { - return nil, err - } - defer rc.Close() - - compressed, err := v1util.IsGzipped(rc) - if err != nil { - return nil, err - } - - var digest v1.Hash - var size int64 - if digest, size, err = computeDigest(opener, compressed); err != nil { - return nil, err - } - - diffID, err := computeDiffID(opener, compressed) - if err != nil { - return nil, err - } - - return &layer{ - digest: digest, - diffID: diffID, - size: size, - compressed: compressed, - opener: opener, - }, nil -} - -func computeDigest(opener Opener, compressed bool) (v1.Hash, int64, error) { - rc, err := opener() - if err != nil { - return v1.Hash{}, 0, err - } - defer rc.Close() - - if compressed { - return v1.SHA256(rc) - } - - reader, err := v1util.GzipReadCloser(ioutil.NopCloser(rc)) - if err != nil { - return v1.Hash{}, 0, err - } - - return v1.SHA256(reader) -} - -func computeDiffID(opener Opener, compressed bool) (v1.Hash, error) { - rc, err := opener() - if err != nil { - return v1.Hash{}, err - } - defer rc.Close() - - if !compressed { - digest, _, err := v1.SHA256(rc) - return digest, err - } - - reader, err := gzip.NewReader(rc) - if err != nil { - return v1.Hash{}, err - } - - diffID, _, err := v1.SHA256(reader) - return diffID, err -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go deleted file mode 100644 index 2ee81f0b8032..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "archive/tar" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// WriteToFile writes in the compressed format to a tarball, on disk. -// This is just syntactic sugar wrapping tarball.Write with a new file. -func WriteToFile(p string, ref name.Reference, img v1.Image) error { - w, err := os.Create(p) - if err != nil { - return err - } - defer w.Close() - - return Write(ref, img, w) -} - -// MultiWriteToFile writes in the compressed format to a tarball, on disk. -// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. -func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image) error { - var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage)) - for i, d := range tagToImage { - refToImage[i] = d - } - return MultiRefWriteToFile(p, refToImage) -} - -// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. -// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. -func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image) error { - w, err := os.Create(p) - if err != nil { - return err - } - defer w.Close() - - return MultiRefWrite(refToImage, w) -} - -// Write is a wrapper to write a single image and tag to a tarball. -func Write(ref name.Reference, img v1.Image, w io.Writer) error { - return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w) -} - -// MultiWrite writes the contents of each image to the provided reader, in the compressed format. -// The contents are written in the following format: -// One manifest.json file at the top level containing information about several images. -// One file for each layer, named after the layer's SHA. -// One file for the config blob, named after its SHA. -func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error { - var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage)) - for i, d := range tagToImage { - refToImage[i] = d - } - return MultiRefWrite(refToImage, w) -} - -// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format. -// The contents are written in the following format: -// One manifest.json file at the top level containing information about several images. -// One file for each layer, named after the layer's SHA. -// One file for the config blob, named after its SHA. -func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error { - tf := tar.NewWriter(w) - defer tf.Close() - - imageToTags := dedupRefToImage(refToImage) - var td tarDescriptor - - for img, tags := range imageToTags { - // Write the config. - cfgName, err := img.ConfigName() - if err != nil { - return err - } - cfgBlob, err := img.RawConfigFile() - if err != nil { - return err - } - if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { - return err - } - - // Write the layers. - layers, err := img.Layers() - if err != nil { - return err - } - layerFiles := make([]string, len(layers)) - for i, l := range layers { - d, err := l.Digest() - if err != nil { - return err - } - - // Munge the file name to appease ancient technology. - // - // tar assumes anything with a colon is a remote tape drive: - // https://www.gnu.org/software/tar/manual/html_section/tar_45.html - // Drop the algorithm prefix, e.g. "sha256:" - hex := d.Hex - - // gunzip expects certain file extensions: - // https://www.gnu.org/software/gzip/manual/html_node/Overview.html - layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) - - r, err := l.Compressed() - if err != nil { - return err - } - blobSize, err := l.Size() - if err != nil { - return err - } - - if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { - return err - } - } - - // Generate the tar descriptor and write it. - sitd := singleImageTarDescriptor{ - Config: cfgName.String(), - RepoTags: tags, - Layers: layerFiles, - } - - td = append(td, sitd) - } - - tdBytes, err := json.Marshal(td) - if err != nil { - return err - } - return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes))) -} - -func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { - imageToTags := make(map[v1.Image][]string) - - for ref, img := range refToImage { - if tag, ok := ref.(name.Tag); ok { - if tags, ok := imageToTags[img]; ok && tags != nil { - imageToTags[img] = append(tags, tag.String()) - } else { - imageToTags[img] = []string{tag.String()} - } - } else { - if _, ok := imageToTags[img]; !ok { - imageToTags[img] = nil - } - } - } - - return imageToTags -} - -// write a file to the provided writer with a corresponding tar header -func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { - hdr := &tar.Header{ - Mode: 0644, - Typeflag: tar.TypeReg, - Size: size, - Name: path, - } - if err := tf.WriteHeader(hdr); err != nil { - return err - } - _, err := io.Copy(tf, r) - return err -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go deleted file mode 100644 index ddaf71962ebc..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// MediaType is an enumeration of the supported mime types that an element of an image might have. -type MediaType string - -// The collection of known MediaType values. -const ( - OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" - OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" - OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" - OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" - OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" - OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" - OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" - DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" - DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" - DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" - DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer.go deleted file mode 100644 index 0925f13d541a..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "io" -) - -// readAndCloser implements io.ReadCloser by reading from a particular io.Reader -// and then calling the provided "Close()" method. -type readAndCloser struct { - io.Reader - CloseFunc func() error -} - -var _ io.ReadCloser = (*readAndCloser)(nil) - -// Close implements io.ReadCloser -func (rac *readAndCloser) Close() error { - return rac.CloseFunc() -} - -// writeAndCloser implements io.WriteCloser by reading from a particular io.Writer -// and then calling the provided "Close()" method. -type writeAndCloser struct { - io.Writer - CloseFunc func() error -} - -var _ io.WriteCloser = (*writeAndCloser)(nil) - -// Close implements io.WriteCloser -func (wac *writeAndCloser) Close() error { - return wac.CloseFunc() -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go deleted file mode 100644 index c9699770ce66..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "encoding/hex" - "fmt" - "hash" - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -type verifyReader struct { - inner io.Reader - hasher hash.Hash - expected v1.Hash -} - -// Read implements io.Reader -func (vc *verifyReader) Read(b []byte) (int, error) { - n, err := vc.inner.Read(b) - if err == io.EOF { - got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size()))) - if want := vc.expected.Hex; got != want { - return n, fmt.Errorf("error verifying %s checksum; got %q, want %q", - vc.expected.Algorithm, got, want) - } - } - return n, err -} - -// VerifyReadCloser wraps the given io.ReadCloser to verify that its contents match -// the provided v1.Hash before io.EOF is returned. -func VerifyReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) { - w, err := v1.Hasher(h.Algorithm) - if err != nil { - return nil, err - } - r2 := io.TeeReader(r, w) - return &readAndCloser{ - Reader: &verifyReader{ - inner: r2, - hasher: w, - expected: h, - }, - CloseFunc: r.Close, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go deleted file mode 100644 index 2b0f24f6a217..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "bytes" - "compress/gzip" - "io" -) - -var gzipMagicHeader = []byte{'\x1f', '\x8b'} - -// GzipReadCloser reads uncompressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which compressed data may be read. -// This uses gzip.BestSpeed for the compression level. -func GzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { - return GzipReadCloserLevel(r, gzip.BestSpeed) -} - -// GzipReadCloserLevel reads uncompressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which compressed data may be read. -// Refer to compress/gzip for the level: -// https://golang.org/pkg/compress/gzip/#pkg-constants -func GzipReadCloserLevel(r io.ReadCloser, level int) (io.ReadCloser, error) { - pr, pw := io.Pipe() - - go func() { - defer pw.Close() - defer r.Close() - - gw, _ := gzip.NewWriterLevel(pw, level) - defer gw.Close() - - _, err := io.Copy(gw, r) - if err != nil { - pr.CloseWithError(err) - } - }() - - return pr, nil -} - -// GunzipReadCloser reads compressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which uncompessed data may be read. -func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { - gr, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - return &readAndCloser{ - Reader: gr, - CloseFunc: func() error { - if err := gr.Close(); err != nil { - return err - } - return r.Close() - }, - }, nil -} - -// IsGzipped detects whether the input stream is compressed. -func IsGzipped(r io.Reader) (bool, error) { - magicHeader := make([]byte, 2) - n, err := r.Read(magicHeader) - if n == 0 && err == io.EOF { - return false, nil - } - if err != nil { - return false, err - } - return bytes.Equal(magicHeader, gzipMagicHeader), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go deleted file mode 100644 index 3440b5e13069..000000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go +++ /dev/null @@ -1,314 +0,0 @@ -// +build !ignore_autogenerated - -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Healthcheck != nil { - in, out := &in.Healthcheck, &out.Healthcheck - *out = new(HealthConfig) - (*in).DeepCopyInto(*out) - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Shell != nil { - in, out := &in.Shell, &out.Shell - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]History, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.RootFS.DeepCopyInto(&out.RootFS) - in.Config.DeepCopyInto(&out.Config) - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. -func (in *ConfigFile) DeepCopy() *ConfigFile { - if in == nil { - return nil - } - out := new(ConfigFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Descriptor) DeepCopyInto(out *Descriptor) { - *out = *in - out.Digest = in.Digest - if in.URLs != nil { - in, out := &in.URLs, &out.URLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Platform != nil { - in, out := &in.Platform, &out.Platform - *out = new(Platform) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. -func (in *Descriptor) DeepCopy() *Descriptor { - if in == nil { - return nil - } - out := new(Descriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Hash) DeepCopyInto(out *Hash) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. -func (in *Hash) DeepCopy() *Hash { - if in == nil { - return nil - } - out := new(Hash) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { - *out = *in - if in.Test != nil { - in, out := &in.Test, &out.Test - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. -func (in *HealthConfig) DeepCopy() *HealthConfig { - if in == nil { - return nil - } - out := new(HealthConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *History) DeepCopyInto(out *History) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. -func (in *History) DeepCopy() *History { - if in == nil { - return nil - } - out := new(History) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { - *out = *in - if in.Manifests != nil { - in, out := &in.Manifests, &out.Manifests - *out = make([]Descriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. -func (in *IndexManifest) DeepCopy() *IndexManifest { - if in == nil { - return nil - } - out := new(IndexManifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Manifest) DeepCopyInto(out *Manifest) { - *out = *in - in.Config.DeepCopyInto(&out.Config) - if in.Layers != nil { - in, out := &in.Layers, &out.Layers - *out = make([]Descriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. -func (in *Manifest) DeepCopy() *Manifest { - if in == nil { - return nil - } - out := new(Manifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Platform) DeepCopyInto(out *Platform) { - *out = *in - if in.OSFeatures != nil { - in, out := &in.OSFeatures, &out.OSFeatures - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. -func (in *Platform) DeepCopy() *Platform { - if in == nil { - return nil - } - out := new(Platform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RootFS) DeepCopyInto(out *RootFS) { - *out = *in - if in.DiffIDs != nil { - in, out := &in.DiffIDs, &out.DiffIDs - *out = make([]Hash, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. -func (in *RootFS) DeepCopy() *RootFS { - if in == nil { - return nil - } - out := new(RootFS) - in.DeepCopyInto(out) - return out -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. -func (in *Time) DeepCopy() *Time { - if in == nil { - return nil - } - out := new(Time) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/google/go-github/AUTHORS b/vendor/github.com/google/go-github/AUTHORS deleted file mode 100644 index 8d04ae7ce0e4..000000000000 --- a/vendor/github.com/google/go-github/AUTHORS +++ /dev/null @@ -1,216 +0,0 @@ -# This is the official list of go-github authors for copyright purposes. -# -# This does not necessarily list everyone who has contributed code, since in -# some cases, their employer may be the copyright holder. To see the full list -# of contributors, see the revision history in source control or -# https://github.com/google/go-github/graphs/contributors. -# -# Authors who wish to be recognized in this file should add themselves (or -# their employer, as appropriate). - -178inaba -Abhinav Gupta -Ahmed Hagy -Ainsley Chong -Akeda Bagus -Akhil Mohan -Alec Thomas -Aleks Clark -Alex Bramley -Alexander Harkness -Allen Sun -Amey Sakhadeo -Andreas Garnæs -Andrew Ryabchun -Andy Grunwald -Andy Hume -Andy Lindeman -Anshuman Bhartiya -Antoine -Antoine Pelisse -Anubha Kushwaha -appilon -Aravind -Arıl Bozoluk -Austin Dizzy -Ben Batha -Beshr Kayali -Beyang Liu -Billy Lynch -Björn Häuser -Brad Harris -Brad Moylan -Bradley Falzon -Brandon Cook -Brian Egizi -Bryan Boreham -Cami Diez -Carlos Alexandro Becker -chandresh-pancholi -Charles Fenwick Elliott -Charlie Yan -Chris King -Chris Roche -Chris Schaefer -Christoph Sassenberg -Colin Misare -Craig Peterson -Cristian Maglie -Daehyeok Mun -Daniel Leavitt -Daniel Nilsson -Dave Du Cros -Dave Henderson -David Deng -David Jannotta -Davide Zipeto -Dennis Webb -Dhi Aurrahman -Diego Lapiduz -Dmitri Shuralyov -dmnlk -Don Petersen -Doug Turner -Drew Fradette -Eli Uriegas -Elliott Beach -Emerson Wood -eperm -erwinvaneyk -Fabrice -Filippo Valsorda -Florian Forster -Francesc Gil -Francis -Fredrik Jönsson -Garrett Squire -George Kontridze -Georgy Buranov -Gnahz -Google Inc. -Grachev Mikhail -griffin_stewie -Guillaume Jacquet -Guz Alexander -Guðmundur Bjarni Ólafsson -Hanno Hecker -Hari haran -haya14busa -Huy Tr -huydx -i2bskn -Isao Jonas -isqua -Jameel Haffejee -Jan Kosecki -Javier Campanini -Jeremy Morris -Jesse Newland -Jihoon Chung -Jimmi Dyson -Joan Saum -Joe Tsai -John Barton -John Engelman -JP Phillips -jpbelanger-mtl -Juan Basso -Julien Garcia Gonzalez -Julien Rostand -Justin Abrahms -Jusung Lee -jzhoucliqr -Katrina Owen -Kautilya Tripathi < tripathi.kautilya@gmail.com> -Keita Urashima -Kevin Burke -Konrad Malawski -Kookheon Kwon -Krzysztof Kowalczyk -Kshitij Saraogi -kyokomi -Lucas Alcantara -Luke Evers -Luke Kysow -Luke Roberts -Luke Young -Maksim Zhylinski -Martin-Louis Bright -Marwan Sulaiman -Mat Geist -Matt -Matt Brender -Matt Gaunt -Matt Landis -Maxime Bury -Michael Spiegel -Michael Tiller -Michał Glapa -Nathan VanBenschoten -Navaneeth Suresh -Neil O'Toole -Nick Miyake -Nick Spragg -Nikhita Raghunath -Noah Zoschke -ns-cweber -Oleg Kovalov -Ondřej Kupka -Palash Nigam -Panagiotis Moustafellos -Parham Alvani -Parker Moore -parkhyukjun89 -Pavel Shtanko -Pete Wagner -Petr Shevtsov -Pierre Carrier -Piotr Zurek -Quinn Slack -Rackspace US, Inc. -Radek Simko -Radliński Ignacy -Rajendra arora -RaviTeja Pothana -rc1140 -Red Hat, Inc. -Rob Figueiredo -Rohit Upadhyay -Ronak Jain -Ruben Vereecken -Ryan Lower -Sahil Dua -saisi -Sam Minnée -Sandeep Sukhani -Sander van Harmelen -Sanket Payghan -Sarasa Kisaragi -Sean Wang -Sebastian Mandrean -Sebastian Mæland Pedersen -Sevki -Shagun Khemka -shakeelrao -Shawn Catanzarite -Shawn Smith -sona-tar -SoundCloud, Ltd. -Stian Eikeland -Tasya Aditya Rukmana -Thomas Bruyelle -Timothée Peignier -Trey Tacon -ttacon -Varadarajan Aravamudhan -Victor Castell -Victor Vrantchan -Vlad Ungureanu -Will Maier -William Bailey -xibz -Yann Malet -Yannick Utard -Yicheng Qin -Yumikiyo Osanai -Zach Latta diff --git a/vendor/github.com/google/go-github/LICENSE b/vendor/github.com/google/go-github/LICENSE deleted file mode 100644 index 28b6486f0b8e..000000000000 --- a/vendor/github.com/google/go-github/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 The go-github AUTHORS. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-github/github/activity.go b/vendor/github.com/google/go-github/github/activity.go deleted file mode 100644 index d6c992c7f507..000000000000 --- a/vendor/github.com/google/go-github/github/activity.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "context" - -// ActivityService handles communication with the activity related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/activity/ -type ActivityService service - -// FeedLink represents a link to a related resource. -type FeedLink struct { - HRef *string `json:"href,omitempty"` - Type *string `json:"type,omitempty"` -} - -// Feeds represents timeline resources in Atom format. -type Feeds struct { - TimelineURL *string `json:"timeline_url,omitempty"` - UserURL *string `json:"user_url,omitempty"` - CurrentUserPublicURL *string `json:"current_user_public_url,omitempty"` - CurrentUserURL *string `json:"current_user_url,omitempty"` - CurrentUserActorURL *string `json:"current_user_actor_url,omitempty"` - CurrentUserOrganizationURL *string `json:"current_user_organization_url,omitempty"` - CurrentUserOrganizationURLs []string `json:"current_user_organization_urls,omitempty"` - Links *struct { - Timeline *FeedLink `json:"timeline,omitempty"` - User *FeedLink `json:"user,omitempty"` - CurrentUserPublic *FeedLink `json:"current_user_public,omitempty"` - CurrentUser *FeedLink `json:"current_user,omitempty"` - CurrentUserActor *FeedLink `json:"current_user_actor,omitempty"` - CurrentUserOrganization *FeedLink `json:"current_user_organization,omitempty"` - CurrentUserOrganizations []FeedLink `json:"current_user_organizations,omitempty"` - } `json:"_links,omitempty"` -} - -// ListFeeds lists all the feeds available to the authenticated user. -// -// GitHub provides several timeline resources in Atom format: -// Timeline: The GitHub global public timeline -// User: The public timeline for any user, using URI template -// Current user public: The public timeline for the authenticated user -// Current user: The private timeline for the authenticated user -// Current user actor: The private timeline for activity created by the -// authenticated user -// Current user organizations: The private timeline for the organizations -// the authenticated user is a member of. -// -// Note: Private feeds are only returned when authenticating via Basic Auth -// since current feed URIs use the older, non revocable auth tokens. -func (s *ActivityService) ListFeeds(ctx context.Context) (*Feeds, *Response, error) { - req, err := s.client.NewRequest("GET", "feeds", nil) - if err != nil { - return nil, nil, err - } - - f := &Feeds{} - resp, err := s.client.Do(ctx, req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go deleted file mode 100644 index 1754b78e9da8..000000000000 --- a/vendor/github.com/google/go-github/github/activity_events.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListEvents drinks from the firehose of all public events across GitHub. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events -func (s *ActivityService) ListEvents(ctx context.Context, opt *ListOptions) ([]*Event, *Response, error) { - u, err := addOptions("events", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListRepositoryEvents lists events for a repository. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-repository-events -func (s *ActivityService) ListRepositoryEvents(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListIssueEventsForRepository lists issue events for a repository. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository -func (s *ActivityService) ListIssueEventsForRepository(ctx context.Context, owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*IssueEvent - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsForRepoNetwork lists public events for a network of repositories. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events-for-a-network-of-repositories -func (s *ActivityService) ListEventsForRepoNetwork(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("networks/%v/%v/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsForOrganization lists public events for an organization. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events-for-an-organization -func (s *ActivityService) ListEventsForOrganization(ctx context.Context, org string, opt *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("orgs/%v/events", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsPerformedByUser lists the events performed by a user. If publicOnly is -// true, only public events will be returned. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-performed-by-a-user -func (s *ActivityService) ListEventsPerformedByUser(ctx context.Context, user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) { - var u string - if publicOnly { - u = fmt.Sprintf("users/%v/events/public", user) - } else { - u = fmt.Sprintf("users/%v/events", user) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsReceivedByUser lists the events received by a user. If publicOnly is -// true, only public events will be returned. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-that-a-user-has-received -func (s *ActivityService) ListEventsReceivedByUser(ctx context.Context, user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) { - var u string - if publicOnly { - u = fmt.Sprintf("users/%v/received_events/public", user) - } else { - u = fmt.Sprintf("users/%v/received_events", user) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListUserEventsForOrganization provides the user’s organization dashboard. You -// must be authenticated as the user to view this. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-for-an-organization -func (s *ActivityService) ListUserEventsForOrganization(ctx context.Context, org, user string, opt *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("users/%v/events/orgs/%v", user, org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/activity_notifications.go b/vendor/github.com/google/go-github/github/activity_notifications.go deleted file mode 100644 index 45c8b2aeced2..000000000000 --- a/vendor/github.com/google/go-github/github/activity_notifications.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// Notification identifies a GitHub notification for a user. -type Notification struct { - ID *string `json:"id,omitempty"` - Repository *Repository `json:"repository,omitempty"` - Subject *NotificationSubject `json:"subject,omitempty"` - - // Reason identifies the event that triggered the notification. - // - // GitHub API docs: https://developer.github.com/v3/activity/notifications/#notification-reasons - Reason *string `json:"reason,omitempty"` - - Unread *bool `json:"unread,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - LastReadAt *time.Time `json:"last_read_at,omitempty"` - URL *string `json:"url,omitempty"` -} - -// NotificationSubject identifies the subject of a notification. -type NotificationSubject struct { - Title *string `json:"title,omitempty"` - URL *string `json:"url,omitempty"` - LatestCommentURL *string `json:"latest_comment_url,omitempty"` - Type *string `json:"type,omitempty"` -} - -// NotificationListOptions specifies the optional parameters to the -// ActivityService.ListNotifications method. -type NotificationListOptions struct { - All bool `url:"all,omitempty"` - Participating bool `url:"participating,omitempty"` - Since time.Time `url:"since,omitempty"` - Before time.Time `url:"before,omitempty"` - - ListOptions -} - -// ListNotifications lists all notifications for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications -func (s *ActivityService) ListNotifications(ctx context.Context, opt *NotificationListOptions) ([]*Notification, *Response, error) { - u := fmt.Sprintf("notifications") - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var notifications []*Notification - resp, err := s.client.Do(ctx, req, ¬ifications) - if err != nil { - return nil, resp, err - } - - return notifications, resp, nil -} - -// ListRepositoryNotifications lists all notifications in a given repository -// for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository -func (s *ActivityService) ListRepositoryNotifications(ctx context.Context, owner, repo string, opt *NotificationListOptions) ([]*Notification, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var notifications []*Notification - resp, err := s.client.Do(ctx, req, ¬ifications) - if err != nil { - return nil, resp, err - } - - return notifications, resp, nil -} - -type markReadOptions struct { - LastReadAt time.Time `json:"last_read_at,omitempty"` -} - -// MarkNotificationsRead marks all notifications up to lastRead as read. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-as-read -func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead time.Time) (*Response, error) { - opts := &markReadOptions{ - LastReadAt: lastRead, - } - req, err := s.client.NewRequest("PUT", "notifications", opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// MarkRepositoryNotificationsRead marks all notifications up to lastRead in -// the specified repository as read. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository -func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead time.Time) (*Response, error) { - opts := &markReadOptions{ - LastReadAt: lastRead, - } - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetThread gets the specified notification thread. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread -func (s *ActivityService) GetThread(ctx context.Context, id string) (*Notification, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - notification := new(Notification) - resp, err := s.client.Do(ctx, req, notification) - if err != nil { - return nil, resp, err - } - - return notification, resp, nil -} - -// MarkThreadRead marks the specified thread as read. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read -func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("notifications/threads/%v", id) - - req, err := s.client.NewRequest("PATCH", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetThreadSubscription checks to see if the authenticated user is subscribed -// to a thread. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription -func (s *ActivityService) GetThreadSubscription(ctx context.Context, id string) (*Subscription, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, nil -} - -// SetThreadSubscription sets the subscription for the specified thread for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription -func (s *ActivityService) SetThreadSubscription(ctx context.Context, id string, subscription *Subscription) (*Subscription, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - - req, err := s.client.NewRequest("PUT", u, subscription) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, nil -} - -// DeleteThreadSubscription deletes the subscription for the specified thread -// for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription -func (s *ActivityService) DeleteThreadSubscription(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/activity_star.go b/vendor/github.com/google/go-github/github/activity_star.go deleted file mode 100644 index 5ae5c101651f..000000000000 --- a/vendor/github.com/google/go-github/github/activity_star.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" -) - -// StarredRepository is returned by ListStarred. -type StarredRepository struct { - StarredAt *Timestamp `json:"starred_at,omitempty"` - Repository *Repository `json:"repo,omitempty"` -} - -// Stargazer represents a user that has starred a repository. -type Stargazer struct { - StarredAt *Timestamp `json:"starred_at,omitempty"` - User *User `json:"user,omitempty"` -} - -// ListStargazers lists people who have starred the specified repo. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#list-stargazers -func (s *ActivityService) ListStargazers(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Stargazer, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeStarringPreview) - - var stargazers []*Stargazer - resp, err := s.client.Do(ctx, req, &stargazers) - if err != nil { - return nil, resp, err - } - - return stargazers, resp, nil -} - -// ActivityListStarredOptions specifies the optional parameters to the -// ActivityService.ListStarred method. -type ActivityListStarredOptions struct { - // How to sort the repository list. Possible values are: created, updated, - // pushed, full_name. Default is "full_name". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Possible values are: asc, desc. - // Default is "asc" when sort is "full_name", otherwise default is "desc". - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListStarred lists all the repos starred by a user. Passing the empty string -// will list the starred repositories for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#list-repositories-being-starred -func (s *ActivityService) ListStarred(ctx context.Context, user string, opt *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/starred", user) - } else { - u = "user/starred" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when APIs fully launch - acceptHeaders := []string{mediaTypeStarringPreview, mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var repos []*StarredRepository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// IsStarred checks if a repository is starred by authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#check-if-you-are-starring-a-repository -func (s *ActivityService) IsStarred(ctx context.Context, owner, repo string) (bool, *Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - resp, err := s.client.Do(ctx, req, nil) - starred, err := parseBoolResponse(err) - return starred, resp, err -} - -// Star a repository as the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#star-a-repository -func (s *ActivityService) Star(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// Unstar a repository as the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#unstar-a-repository -func (s *ActivityService) Unstar(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/activity_watching.go b/vendor/github.com/google/go-github/github/activity_watching.go deleted file mode 100644 index c749ca86e7c2..000000000000 --- a/vendor/github.com/google/go-github/github/activity_watching.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Subscription identifies a repository or thread subscription. -type Subscription struct { - Subscribed *bool `json:"subscribed,omitempty"` - Ignored *bool `json:"ignored,omitempty"` - Reason *string `json:"reason,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - - // only populated for repository subscriptions - RepositoryURL *string `json:"repository_url,omitempty"` - - // only populated for thread subscriptions - ThreadURL *string `json:"thread_url,omitempty"` -} - -// ListWatchers lists watchers of a particular repo. -// -// GitHub API docs: https://developer.github.com/v3/activity/watching/#list-watchers -func (s *ActivityService) ListWatchers(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var watchers []*User - resp, err := s.client.Do(ctx, req, &watchers) - if err != nil { - return nil, resp, err - } - - return watchers, resp, nil -} - -// ListWatched lists the repositories the specified user is watching. Passing -// the empty string will fetch watched repos for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched -func (s *ActivityService) ListWatched(ctx context.Context, user string, opt *ListOptions) ([]*Repository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/subscriptions", user) - } else { - u = "user/subscriptions" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var watched []*Repository - resp, err := s.client.Do(ctx, req, &watched) - if err != nil { - return nil, resp, err - } - - return watched, resp, nil -} - -// GetRepositorySubscription returns the subscription for the specified -// repository for the authenticated user. If the authenticated user is not -// watching the repository, a nil Subscription is returned. -// -// GitHub API docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription -func (s *ActivityService) GetRepositorySubscription(ctx context.Context, owner, repo string) (*Subscription, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - // if it's just a 404, don't return that as an error - _, err = parseBoolResponse(err) - return nil, resp, err - } - - return sub, resp, nil -} - -// SetRepositorySubscription sets the subscription for the specified repository -// for the authenticated user. -// -// To watch a repository, set subscription.Subscribed to true. -// To ignore notifications made within a repository, set subscription.Ignored to true. -// To stop watching a repository, use DeleteRepositorySubscription. -// -// GitHub API docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription -func (s *ActivityService) SetRepositorySubscription(ctx context.Context, owner, repo string, subscription *Subscription) (*Subscription, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - - req, err := s.client.NewRequest("PUT", u, subscription) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, nil -} - -// DeleteRepositorySubscription deletes the subscription for the specified -// repository for the authenticated user. -// -// This is used to stop watching a repository. To control whether or not to -// receive notifications from a repository, use SetRepositorySubscription. -// -// GitHub API docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription -func (s *ActivityService) DeleteRepositorySubscription(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/admin.go b/vendor/github.com/google/go-github/github/admin.go deleted file mode 100644 index 2d96733a1c7c..000000000000 --- a/vendor/github.com/google/go-github/github/admin.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AdminService handles communication with the admin related methods of the -// GitHub API. These API routes are normally only accessible for GitHub -// Enterprise installations. -// -// GitHub API docs: https://developer.github.com/v3/enterprise/ -type AdminService service - -// TeamLDAPMapping represents the mapping between a GitHub team and an LDAP group. -type TeamLDAPMapping struct { - ID *int64 `json:"id,omitempty"` - LDAPDN *string `json:"ldap_dn,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Slug *string `json:"slug,omitempty"` - Description *string `json:"description,omitempty"` - Privacy *string `json:"privacy,omitempty"` - Permission *string `json:"permission,omitempty"` - - MembersURL *string `json:"members_url,omitempty"` - RepositoriesURL *string `json:"repositories_url,omitempty"` -} - -func (m TeamLDAPMapping) String() string { - return Stringify(m) -} - -// UserLDAPMapping represents the mapping between a GitHub user and an LDAP user. -type UserLDAPMapping struct { - ID *int64 `json:"id,omitempty"` - LDAPDN *string `json:"ldap_dn,omitempty"` - Login *string `json:"login,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` -} - -func (m UserLDAPMapping) String() string { - return Stringify(m) -} - -// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user. -// -// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-user -func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) { - u := fmt.Sprintf("admin/ldap/users/%v/mapping", user) - req, err := s.client.NewRequest("PATCH", u, mapping) - if err != nil { - return nil, nil, err - } - - m := new(UserLDAPMapping) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group. -// -// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-team -func (s *AdminService) UpdateTeamLDAPMapping(ctx context.Context, team int64, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) { - u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team) - req, err := s.client.NewRequest("PATCH", u, mapping) - if err != nil { - return nil, nil, err - } - - m := new(TeamLDAPMapping) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/admin_stats.go b/vendor/github.com/google/go-github/github/admin_stats.go deleted file mode 100644 index b5645f8c1764..000000000000 --- a/vendor/github.com/google/go-github/github/admin_stats.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AdminStats represents a variety of stats of a Github Enterprise -// installation. -type AdminStats struct { - Issues *IssueStats `json:"issues,omitempty"` - Hooks *HookStats `json:"hooks,omitempty"` - Milestones *MilestoneStats `json:"milestones,omitempty"` - Orgs *OrgStats `json:"orgs,omitempty"` - Comments *CommentStats `json:"comments,omitempty"` - Pages *PageStats `json:"pages,omitempty"` - Users *UserStats `json:"users,omitempty"` - Gists *GistStats `json:"gists,omitempty"` - Pulls *PullStats `json:"pulls,omitempty"` - Repos *RepoStats `json:"repos,omitempty"` -} - -func (s AdminStats) String() string { - return Stringify(s) -} - -// IssueStats represents the number of total, open and closed issues. -type IssueStats struct { - TotalIssues *int `json:"total_issues,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` - ClosedIssues *int `json:"closed_issues,omitempty"` -} - -func (s IssueStats) String() string { - return Stringify(s) -} - -// HookStats represents the number of total, active and inactive hooks. -type HookStats struct { - TotalHooks *int `json:"total_hooks,omitempty"` - ActiveHooks *int `json:"active_hooks,omitempty"` - InactiveHooks *int `json:"inactive_hooks,omitempty"` -} - -func (s HookStats) String() string { - return Stringify(s) -} - -// MilestoneStats represents the number of total, open and close milestones. -type MilestoneStats struct { - TotalMilestones *int `json:"total_milestones,omitempty"` - OpenMilestones *int `json:"open_milestones,omitempty"` - ClosedMilestones *int `json:"closed_milestones,omitempty"` -} - -func (s MilestoneStats) String() string { - return Stringify(s) -} - -// OrgStats represents the number of total, disabled organizations and the team -// and team member count. -type OrgStats struct { - TotalOrgs *int `json:"total_orgs,omitempty"` - DisabledOrgs *int `json:"disabled_orgs,omitempty"` - TotalTeams *int `json:"total_teams,omitempty"` - TotalTeamMembers *int `json:"total_team_members,omitempty"` -} - -func (s OrgStats) String() string { - return Stringify(s) -} - -// CommentStats represents the number of total comments on commits, gists, issues -// and pull requests. -type CommentStats struct { - TotalCommitComments *int `json:"total_commit_comments,omitempty"` - TotalGistComments *int `json:"total_gist_comments,omitempty"` - TotalIssueComments *int `json:"total_issue_comments,omitempty"` - TotalPullRequestComments *int `json:"total_pull_request_comments,omitempty"` -} - -func (s CommentStats) String() string { - return Stringify(s) -} - -// PageStats represents the total number of github pages. -type PageStats struct { - TotalPages *int `json:"total_pages,omitempty"` -} - -func (s PageStats) String() string { - return Stringify(s) -} - -// UserStats represents the number of total, admin and suspended users. -type UserStats struct { - TotalUsers *int `json:"total_users,omitempty"` - AdminUsers *int `json:"admin_users,omitempty"` - SuspendedUsers *int `json:"suspended_users,omitempty"` -} - -func (s UserStats) String() string { - return Stringify(s) -} - -// GistStats represents the number of total, private and public gists. -type GistStats struct { - TotalGists *int `json:"total_gists,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` -} - -func (s GistStats) String() string { - return Stringify(s) -} - -// PullStats represents the number of total, merged, mergable and unmergeable -// pull-requests. -type PullStats struct { - TotalPulls *int `json:"total_pulls,omitempty"` - MergedPulls *int `json:"merged_pulls,omitempty"` - MergablePulls *int `json:"mergeable_pulls,omitempty"` - UnmergablePulls *int `json:"unmergeable_pulls,omitempty"` -} - -func (s PullStats) String() string { - return Stringify(s) -} - -// RepoStats represents the number of total, root, fork, organization repositories -// together with the total number of pushes and wikis. -type RepoStats struct { - TotalRepos *int `json:"total_repos,omitempty"` - RootRepos *int `json:"root_repos,omitempty"` - ForkRepos *int `json:"fork_repos,omitempty"` - OrgRepos *int `json:"org_repos,omitempty"` - TotalPushes *int `json:"total_pushes,omitempty"` - TotalWikis *int `json:"total_wikis,omitempty"` -} - -func (s RepoStats) String() string { - return Stringify(s) -} - -// GetAdminStats returns a variety of metrics about a Github Enterprise -// installation. -// -// Please note that this is only available to site administrators, -// otherwise it will error with a 404 not found (instead of 401 or 403). -// -// GitHub API docs: https://developer.github.com/v3/enterprise-admin/admin_stats/ -func (s *AdminService) GetAdminStats(ctx context.Context) (*AdminStats, *Response, error) { - u := fmt.Sprintf("enterprise/stats/all") - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - m := new(AdminStats) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/apps.go b/vendor/github.com/google/go-github/github/apps.go deleted file mode 100644 index ae3aabda31e5..000000000000 --- a/vendor/github.com/google/go-github/github/apps.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// AppsService provides access to the installation related functions -// in the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/apps/ -type AppsService service - -// App represents a GitHub App. -type App struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - ExternalURL *string `json:"external_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` -} - -// InstallationToken represents an installation token. -type InstallationToken struct { - Token *string `json:"token,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` -} - -// InstallationPermissions lists the permissions for metadata, contents, issues and single file for an installation. -type InstallationPermissions struct { - Metadata *string `json:"metadata,omitempty"` - Contents *string `json:"contents,omitempty"` - Issues *string `json:"issues,omitempty"` - SingleFile *string `json:"single_file,omitempty"` -} - -// Installation represents a GitHub Apps installation. -type Installation struct { - ID *int64 `json:"id,omitempty"` - AppID *int64 `json:"app_id,omitempty"` - TargetID *int64 `json:"target_id,omitempty"` - Account *User `json:"account,omitempty"` - AccessTokensURL *string `json:"access_tokens_url,omitempty"` - RepositoriesURL *string `json:"repositories_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - TargetType *string `json:"target_type,omitempty"` - SingleFileName *string `json:"single_file_name,omitempty"` - RepositorySelection *string `json:"repository_selection,omitempty"` - Events []string `json:"events,omitempty"` - Permissions *InstallationPermissions `json:"permissions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -func (i Installation) String() string { - return Stringify(i) -} - -// Get a single GitHub App. Passing the empty string will get -// the authenticated GitHub App. -// -// Note: appSlug is just the URL-friendly name of your GitHub App. -// You can find this on the settings page for your GitHub App -// (e.g., https://github.com/settings/apps/:app_slug). -// -// GitHub API docs: https://developer.github.com/v3/apps/#get-a-single-github-app -func (s *AppsService) Get(ctx context.Context, appSlug string) (*App, *Response, error) { - var u string - if appSlug != "" { - u = fmt.Sprintf("apps/%v", appSlug) - } else { - u = "app" - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - app := new(App) - resp, err := s.client.Do(ctx, req, app) - if err != nil { - return nil, resp, err - } - - return app, resp, nil -} - -// ListInstallations lists the installations that the current GitHub App has. -// -// GitHub API docs: https://developer.github.com/v3/apps/#find-installations -func (s *AppsService) ListInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) { - u, err := addOptions("app/installations", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - var i []*Installation - resp, err := s.client.Do(ctx, req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// GetInstallation returns the specified installation. -// -// GitHub API docs: https://developer.github.com/v3/apps/#get-a-single-installation -func (s *AppsService) GetInstallation(ctx context.Context, id int64) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("app/installations/%v", id)) -} - -// ListUserInstallations lists installations that are accessible to the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/apps/#list-installations-for-user -func (s *AppsService) ListUserInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) { - u, err := addOptions("user/installations", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - var i struct { - Installations []*Installation `json:"installations"` - } - resp, err := s.client.Do(ctx, req, &i) - if err != nil { - return nil, resp, err - } - - return i.Installations, resp, nil -} - -// CreateInstallationToken creates a new installation token. -// -// GitHub API docs: https://developer.github.com/v3/apps/#create-a-new-installation-token -func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64) (*InstallationToken, *Response, error) { - u := fmt.Sprintf("app/installations/%v/access_tokens", id) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - t := new(InstallationToken) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// FindOrganizationInstallation finds the organization's installation information. -// -// GitHub API docs: https://developer.github.com/v3/apps/#find-organization-installation -func (s *AppsService) FindOrganizationInstallation(ctx context.Context, org string) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("orgs/%v/installation", org)) -} - -// FindRepositoryInstallation finds the repository's installation information. -// -// GitHub API docs: https://developer.github.com/v3/apps/#find-repository-installation -func (s *AppsService) FindRepositoryInstallation(ctx context.Context, owner, repo string) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("repos/%v/%v/installation", owner, repo)) -} - -// FindRepositoryInstallationByID finds the repository's installation information. -// -// Note: FindRepositoryInstallationByID uses the undocumented GitHub API endpoint /repositories/:id/installation. -func (s *AppsService) FindRepositoryInstallationByID(ctx context.Context, id int64) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("repositories/%d/installation", id)) -} - -// FindUserInstallation finds the user's installation information. -// -// GitHub API docs: https://developer.github.com/v3/apps/#find-repository-installation -func (s *AppsService) FindUserInstallation(ctx context.Context, user string) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("users/%v/installation", user)) -} - -func (s *AppsService) getInstallation(ctx context.Context, url string) (*Installation, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - i := new(Installation) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/apps_installation.go b/vendor/github.com/google/go-github/github/apps_installation.go deleted file mode 100644 index ccfecb8d8950..000000000000 --- a/vendor/github.com/google/go-github/github/apps_installation.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListRepos lists the repositories that are accessible to the authenticated installation. -// -// GitHub API docs: https://developer.github.com/v3/apps/installations/#list-repositories -func (s *AppsService) ListRepos(ctx context.Context, opt *ListOptions) ([]*Repository, *Response, error) { - u, err := addOptions("installation/repositories", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - var r struct { - Repositories []*Repository `json:"repositories"` - } - resp, err := s.client.Do(ctx, req, &r) - if err != nil { - return nil, resp, err - } - - return r.Repositories, resp, nil -} - -// ListUserRepos lists repositories that are accessible -// to the authenticated user for an installation. -// -// GitHub API docs: https://developer.github.com/v3/apps/installations/#list-repositories-accessible-to-the-user-for-an-installation -func (s *AppsService) ListUserRepos(ctx context.Context, id int64, opt *ListOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("user/installations/%v/repositories", id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeIntegrationPreview) - - var r struct { - Repositories []*Repository `json:"repositories"` - } - resp, err := s.client.Do(ctx, req, &r) - if err != nil { - return nil, resp, err - } - - return r.Repositories, resp, nil -} - -// AddRepository adds a single repository to an installation. -// -// GitHub API docs: https://developer.github.com/v3/apps/installations/#add-repository-to-installation -func (s *AppsService) AddRepository(ctx context.Context, instID, repoID int64) (*Repository, *Response, error) { - u := fmt.Sprintf("apps/installations/%v/repositories/%v", instID, repoID) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// RemoveRepository removes a single repository from an installation. -// -// GitHub docs: https://developer.github.com/v3/apps/installations/#remove-repository-from-installation -func (s *AppsService) RemoveRepository(ctx context.Context, instID, repoID int64) (*Response, error) { - u := fmt.Sprintf("apps/installations/%v/repositories/%v", instID, repoID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/apps_marketplace.go b/vendor/github.com/google/go-github/github/apps_marketplace.go deleted file mode 100644 index 6dd568a2dc0a..000000000000 --- a/vendor/github.com/google/go-github/github/apps_marketplace.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// MarketplaceService handles communication with the marketplace related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/apps/marketplace/ -type MarketplaceService struct { - client *Client - // Stubbed controls whether endpoints that return stubbed data are used - // instead of production endpoints. Stubbed data is fake data that's useful - // for testing your GitHub Apps. Stubbed data is hard-coded and will not - // change based on actual subscriptions. - // - // GitHub API docs: https://developer.github.com/v3/apps/marketplace/ - Stubbed bool -} - -// MarketplacePlan represents a GitHub Apps Marketplace Listing Plan. -type MarketplacePlan struct { - URL *string `json:"url,omitempty"` - AccountsURL *string `json:"accounts_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - MonthlyPriceInCents *int `json:"monthly_price_in_cents,omitempty"` - YearlyPriceInCents *int `json:"yearly_price_in_cents,omitempty"` - // The pricing model for this listing. Can be one of "flat-rate", "per-unit", or "free". - PriceModel *string `json:"price_model,omitempty"` - UnitName *string `json:"unit_name,omitempty"` - Bullets *[]string `json:"bullets,omitempty"` - // State can be one of the values "draft" or "published". - State *string `json:"state,omitempty"` - HasFreeTrial *bool `json:"has_free_trial,omitempty"` -} - -// MarketplacePurchase represents a GitHub Apps Marketplace Purchase. -type MarketplacePurchase struct { - // BillingCycle can be one of the values "yearly", "monthly" or nil. - BillingCycle *string `json:"billing_cycle,omitempty"` - NextBillingDate *Timestamp `json:"next_billing_date,omitempty"` - UnitCount *int `json:"unit_count,omitempty"` - Plan *MarketplacePlan `json:"plan,omitempty"` - Account *MarketplacePlanAccount `json:"account,omitempty"` - OnFreeTrial *bool `json:"on_free_trial,omitempty"` - FreeTrialEndsOn *Timestamp `json:"free_trial_ends_on,omitempty"` -} - -// MarketplacePendingChange represents a pending change to a GitHub Apps Marketplace Plan. -type MarketplacePendingChange struct { - EffectiveDate *Timestamp `json:"effective_date,omitempty"` - UnitCount *int `json:"unit_count,omitempty"` - ID *int64 `json:"id,omitempty"` - Plan *MarketplacePlan `json:"plan,omitempty"` -} - -// MarketplacePlanAccount represents a GitHub Account (user or organization) on a specific plan. -type MarketplacePlanAccount struct { - URL *string `json:"url,omitempty"` - Type *string `json:"type,omitempty"` - ID *int64 `json:"id,omitempty"` - Login *string `json:"login,omitempty"` - Email *string `json:"email,omitempty"` - OrganizationBillingEmail *string `json:"organization_billing_email,omitempty"` - MarketplacePurchase *MarketplacePurchase `json:"marketplace_purchase,omitempty"` - MarketplacePendingChange *MarketplacePendingChange `json:"marketplace_pending_change,omitempty"` -} - -// ListPlans lists all plans for your Marketplace listing. -// -// GitHub API docs: https://developer.github.com/v3/apps/marketplace/#list-all-plans-for-your-marketplace-listing -func (s *MarketplaceService) ListPlans(ctx context.Context, opt *ListOptions) ([]*MarketplacePlan, *Response, error) { - uri := s.marketplaceURI("plans") - u, err := addOptions(uri, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var plans []*MarketplacePlan - resp, err := s.client.Do(ctx, req, &plans) - if err != nil { - return nil, resp, err - } - - return plans, resp, nil -} - -// ListPlanAccountsForPlan lists all GitHub accounts (user or organization) on a specific plan. -// -// GitHub API docs: https://developer.github.com/v3/apps/marketplace/#list-all-github-accounts-user-or-organization-on-a-specific-plan -func (s *MarketplaceService) ListPlanAccountsForPlan(ctx context.Context, planID int64, opt *ListOptions) ([]*MarketplacePlanAccount, *Response, error) { - uri := s.marketplaceURI(fmt.Sprintf("plans/%v/accounts", planID)) - u, err := addOptions(uri, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var accounts []*MarketplacePlanAccount - resp, err := s.client.Do(ctx, req, &accounts) - if err != nil { - return nil, resp, err - } - - return accounts, resp, nil -} - -// ListPlanAccountsForAccount lists all GitHub accounts (user or organization) associated with an account. -// -// GitHub API docs: https://developer.github.com/v3/apps/marketplace/#check-if-a-github-account-is-associated-with-any-marketplace-listing -func (s *MarketplaceService) ListPlanAccountsForAccount(ctx context.Context, accountID int64, opt *ListOptions) ([]*MarketplacePlanAccount, *Response, error) { - uri := s.marketplaceURI(fmt.Sprintf("accounts/%v", accountID)) - u, err := addOptions(uri, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var accounts []*MarketplacePlanAccount - resp, err := s.client.Do(ctx, req, &accounts) - if err != nil { - return nil, resp, err - } - - return accounts, resp, nil -} - -// ListMarketplacePurchasesForUser lists all GitHub marketplace purchases made by a user. -// -// GitHub API docs: https://developer.github.com/v3/apps/marketplace/#get-a-users-marketplace-purchases -func (s *MarketplaceService) ListMarketplacePurchasesForUser(ctx context.Context, opt *ListOptions) ([]*MarketplacePurchase, *Response, error) { - uri := "user/marketplace_purchases" - if s.Stubbed { - uri = "user/marketplace_purchases/stubbed" - } - - u, err := addOptions(uri, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var purchases []*MarketplacePurchase - resp, err := s.client.Do(ctx, req, &purchases) - if err != nil { - return nil, resp, err - } - return purchases, resp, nil -} - -func (s *MarketplaceService) marketplaceURI(endpoint string) string { - url := "marketplace_listing" - if s.Stubbed { - url = "marketplace_listing/stubbed" - } - return url + "/" + endpoint -} diff --git a/vendor/github.com/google/go-github/github/authorizations.go b/vendor/github.com/google/go-github/github/authorizations.go deleted file mode 100644 index 190205b02ac3..000000000000 --- a/vendor/github.com/google/go-github/github/authorizations.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2015 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Scope models a GitHub authorization scope. -// -// GitHub API docs: https://developer.github.com/v3/oauth/#scopes -type Scope string - -// This is the set of scopes for GitHub API V3 -const ( - ScopeNone Scope = "(no scope)" // REVISIT: is this actually returned, or just a documentation artifact? - ScopeUser Scope = "user" - ScopeUserEmail Scope = "user:email" - ScopeUserFollow Scope = "user:follow" - ScopePublicRepo Scope = "public_repo" - ScopeRepo Scope = "repo" - ScopeRepoDeployment Scope = "repo_deployment" - ScopeRepoStatus Scope = "repo:status" - ScopeDeleteRepo Scope = "delete_repo" - ScopeNotifications Scope = "notifications" - ScopeGist Scope = "gist" - ScopeReadRepoHook Scope = "read:repo_hook" - ScopeWriteRepoHook Scope = "write:repo_hook" - ScopeAdminRepoHook Scope = "admin:repo_hook" - ScopeAdminOrgHook Scope = "admin:org_hook" - ScopeReadOrg Scope = "read:org" - ScopeWriteOrg Scope = "write:org" - ScopeAdminOrg Scope = "admin:org" - ScopeReadPublicKey Scope = "read:public_key" - ScopeWritePublicKey Scope = "write:public_key" - ScopeAdminPublicKey Scope = "admin:public_key" - ScopeReadGPGKey Scope = "read:gpg_key" - ScopeWriteGPGKey Scope = "write:gpg_key" - ScopeAdminGPGKey Scope = "admin:gpg_key" -) - -// AuthorizationsService handles communication with the authorization related -// methods of the GitHub API. -// -// This service requires HTTP Basic Authentication; it cannot be accessed using -// an OAuth token. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/ -type AuthorizationsService service - -// Authorization represents an individual GitHub authorization. -type Authorization struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Scopes []Scope `json:"scopes,omitempty"` - Token *string `json:"token,omitempty"` - TokenLastEight *string `json:"token_last_eight,omitempty"` - HashedToken *string `json:"hashed_token,omitempty"` - App *AuthorizationApp `json:"app,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` - - // User is only populated by the Check and Reset methods. - User *User `json:"user,omitempty"` -} - -func (a Authorization) String() string { - return Stringify(a) -} - -// AuthorizationApp represents an individual GitHub app (in the context of authorization). -type AuthorizationApp struct { - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - ClientID *string `json:"client_id,omitempty"` -} - -func (a AuthorizationApp) String() string { - return Stringify(a) -} - -// Grant represents an OAuth application that has been granted access to an account. -type Grant struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - App *AuthorizationApp `json:"app,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Scopes []string `json:"scopes,omitempty"` -} - -func (g Grant) String() string { - return Stringify(g) -} - -// AuthorizationRequest represents a request to create an authorization. -type AuthorizationRequest struct { - Scopes []Scope `json:"scopes,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - ClientID *string `json:"client_id,omitempty"` - ClientSecret *string `json:"client_secret,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` -} - -func (a AuthorizationRequest) String() string { - return Stringify(a) -} - -// AuthorizationUpdateRequest represents a request to update an authorization. -// -// Note that for any one update, you must only provide one of the "scopes" -// fields. That is, you may provide only one of "Scopes", or "AddScopes", or -// "RemoveScopes". -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization -type AuthorizationUpdateRequest struct { - Scopes []string `json:"scopes,omitempty"` - AddScopes []string `json:"add_scopes,omitempty"` - RemoveScopes []string `json:"remove_scopes,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` -} - -func (a AuthorizationUpdateRequest) String() string { - return Stringify(a) -} - -// List the authorizations for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-authorizations -func (s *AuthorizationsService) List(ctx context.Context, opt *ListOptions) ([]*Authorization, *Response, error) { - u := "authorizations" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var auths []*Authorization - resp, err := s.client.Do(ctx, req, &auths) - if err != nil { - return nil, resp, err - } - return auths, resp, nil -} - -// Get a single authorization. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-authorization -func (s *AuthorizationsService) Get(ctx context.Context, id int64) (*Authorization, *Response, error) { - u := fmt.Sprintf("authorizations/%d", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - return a, resp, nil -} - -// Create a new authorization for the specified OAuth application. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization -func (s *AuthorizationsService) Create(ctx context.Context, auth *AuthorizationRequest) (*Authorization, *Response, error) { - u := "authorizations" - - req, err := s.client.NewRequest("POST", u, auth) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - return a, resp, nil -} - -// GetOrCreateForApp creates a new authorization for the specified OAuth -// application, only if an authorization for that application doesn’t already -// exist for the user. -// -// If a new token is created, the HTTP status code will be "201 Created", and -// the returned Authorization.Token field will be populated. If an existing -// token is returned, the status code will be "200 OK" and the -// Authorization.Token field will be empty. -// -// clientID is the OAuth Client ID with which to create the token. -// -// GitHub API docs: -// https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app -// https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app-and-fingerprint -func (s *AuthorizationsService) GetOrCreateForApp(ctx context.Context, clientID string, auth *AuthorizationRequest) (*Authorization, *Response, error) { - var u string - if auth.Fingerprint == nil || *auth.Fingerprint == "" { - u = fmt.Sprintf("authorizations/clients/%v", clientID) - } else { - u = fmt.Sprintf("authorizations/clients/%v/%v", clientID, *auth.Fingerprint) - } - - req, err := s.client.NewRequest("PUT", u, auth) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// Edit a single authorization. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization -func (s *AuthorizationsService) Edit(ctx context.Context, id int64, auth *AuthorizationUpdateRequest) (*Authorization, *Response, error) { - u := fmt.Sprintf("authorizations/%d", id) - - req, err := s.client.NewRequest("PATCH", u, auth) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// Delete a single authorization. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization -func (s *AuthorizationsService) Delete(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("authorizations/%d", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Check if an OAuth token is valid for a specific app. -// -// Note that this operation requires the use of BasicAuth, but where the -// username is the OAuth application clientID, and the password is its -// clientSecret. Invalid tokens will return a 404 Not Found. -// -// The returned Authorization.User field will be populated. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#check-an-authorization -func (s *AuthorizationsService) Check(ctx context.Context, clientID string, token string) (*Authorization, *Response, error) { - u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// Reset is used to reset a valid OAuth token without end user involvement. -// Applications must save the "token" property in the response, because changes -// take effect immediately. -// -// Note that this operation requires the use of BasicAuth, but where the -// username is the OAuth application clientID, and the password is its -// clientSecret. Invalid tokens will return a 404 Not Found. -// -// The returned Authorization.User field will be populated. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#reset-an-authorization -func (s *AuthorizationsService) Reset(ctx context.Context, clientID string, token string) (*Authorization, *Response, error) { - u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// Revoke an authorization for an application. -// -// Note that this operation requires the use of BasicAuth, but where the -// username is the OAuth application clientID, and the password is its -// clientSecret. Invalid tokens will return a 404 Not Found. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#revoke-an-authorization-for-an-application -func (s *AuthorizationsService) Revoke(ctx context.Context, clientID string, token string) (*Response, error) { - u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListGrants lists the set of OAuth applications that have been granted -// access to a user's account. This will return one entry for each application -// that has been granted access to the account, regardless of the number of -// tokens an application has generated for the user. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-grants -func (s *AuthorizationsService) ListGrants(ctx context.Context, opt *ListOptions) ([]*Grant, *Response, error) { - u, err := addOptions("applications/grants", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - grants := []*Grant{} - resp, err := s.client.Do(ctx, req, &grants) - if err != nil { - return nil, resp, err - } - - return grants, resp, nil -} - -// GetGrant gets a single OAuth application grant. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-grant -func (s *AuthorizationsService) GetGrant(ctx context.Context, id int64) (*Grant, *Response, error) { - u := fmt.Sprintf("applications/grants/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - grant := new(Grant) - resp, err := s.client.Do(ctx, req, grant) - if err != nil { - return nil, resp, err - } - - return grant, resp, nil -} - -// DeleteGrant deletes an OAuth application grant. Deleting an application's -// grant will also delete all OAuth tokens associated with the application for -// the user. -// -// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-a-grant -func (s *AuthorizationsService) DeleteGrant(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("applications/grants/%d", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// CreateImpersonation creates an impersonation OAuth token. -// -// This requires admin permissions. With the returned Authorization.Token -// you can e.g. create or delete a user's public SSH key. NOTE: creating a -// new token automatically revokes an existing one. -// -// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#create-an-impersonation-oauth-token -func (s *AuthorizationsService) CreateImpersonation(ctx context.Context, username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) { - u := fmt.Sprintf("admin/users/%v/authorizations", username) - req, err := s.client.NewRequest("POST", u, authReq) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - return a, resp, nil -} - -// DeleteImpersonation deletes an impersonation OAuth token. -// -// NOTE: there can be only one at a time. -// -// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#delete-an-impersonation-oauth-token -func (s *AuthorizationsService) DeleteImpersonation(ctx context.Context, username string) (*Response, error) { - u := fmt.Sprintf("admin/users/%v/authorizations", username) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/checks.go b/vendor/github.com/google/go-github/github/checks.go deleted file mode 100644 index f27d40d4d33c..000000000000 --- a/vendor/github.com/google/go-github/github/checks.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ChecksService provides access to the Checks API in the -// GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/checks/ -type ChecksService service - -// CheckRun represents a GitHub check run on a repository associated with a GitHub app. -type CheckRun struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - ExternalID *string `json:"external_id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DetailsURL *string `json:"details_url,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - CompletedAt *Timestamp `json:"completed_at,omitempty"` - Output *CheckRunOutput `json:"output,omitempty"` - Name *string `json:"name,omitempty"` - CheckSuite *CheckSuite `json:"check_suite,omitempty"` - App *App `json:"app,omitempty"` - PullRequests []*PullRequest `json:"pull_requests,omitempty"` -} - -// CheckRunOutput represents the output of a CheckRun. -type CheckRunOutput struct { - Title *string `json:"title,omitempty"` - Summary *string `json:"summary,omitempty"` - Text *string `json:"text,omitempty"` - AnnotationsCount *int `json:"annotations_count,omitempty"` - AnnotationsURL *string `json:"annotations_url,omitempty"` - Annotations []*CheckRunAnnotation `json:"annotations,omitempty"` - Images []*CheckRunImage `json:"images,omitempty"` -} - -// CheckRunAnnotation represents an annotation object for a CheckRun output. -type CheckRunAnnotation struct { - Path *string `json:"path,omitempty"` - BlobHRef *string `json:"blob_href,omitempty"` - StartLine *int `json:"start_line,omitempty"` - EndLine *int `json:"end_line,omitempty"` - AnnotationLevel *string `json:"annotation_level,omitempty"` - Message *string `json:"message,omitempty"` - Title *string `json:"title,omitempty"` - RawDetails *string `json:"raw_details,omitempty"` -} - -// CheckRunImage represents an image object for a CheckRun output. -type CheckRunImage struct { - Alt *string `json:"alt,omitempty"` - ImageURL *string `json:"image_url,omitempty"` - Caption *string `json:"caption,omitempty"` -} - -// CheckSuite represents a suite of check runs. -type CheckSuite struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - URL *string `json:"url,omitempty"` - BeforeSHA *string `json:"before,omitempty"` - AfterSHA *string `json:"after,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - App *App `json:"app,omitempty"` - Repository *Repository `json:"repository,omitempty"` - PullRequests []*PullRequest `json:"pull_requests,omitempty"` -} - -func (c CheckRun) String() string { - return Stringify(c) -} - -func (c CheckSuite) String() string { - return Stringify(c) -} - -// GetCheckRun gets a check-run for a repository. -// -// GitHub API docs: https://developer.github.com/v3/checks/runs/#get-a-single-check-run -func (s *ChecksService) GetCheckRun(ctx context.Context, owner, repo string, checkRunID int64) (*CheckRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v", owner, repo, checkRunID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkRun := new(CheckRun) - resp, err := s.client.Do(ctx, req, checkRun) - if err != nil { - return nil, resp, err - } - - return checkRun, resp, nil -} - -// GetCheckSuite gets a single check suite. -// -// GitHub API docs: https://developer.github.com/v3/checks/suites/#get-a-single-check-suite -func (s *ChecksService) GetCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64) (*CheckSuite, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/%v", owner, repo, checkSuiteID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkSuite := new(CheckSuite) - resp, err := s.client.Do(ctx, req, checkSuite) - if err != nil { - return nil, resp, err - } - - return checkSuite, resp, nil -} - -// CreateCheckRunOptions sets up parameters needed to create a CheckRun. -type CreateCheckRunOptions struct { - Name string `json:"name"` // The name of the check (e.g., "code-coverage"). (Required.) - HeadBranch string `json:"head_branch"` // The name of the branch to perform a check against. (Required.) - HeadSHA string `json:"head_sha"` // The SHA of the commit. (Required.) - DetailsURL *string `json:"details_url,omitempty"` // The URL of the integrator's site that has the full details of the check. (Optional.) - ExternalID *string `json:"external_id,omitempty"` // A reference for the run on the integrator's system. (Optional.) - Status *string `json:"status,omitempty"` // The current status. Can be one of "queued", "in_progress", or "completed". Default: "queued". (Optional.) - Conclusion *string `json:"conclusion,omitempty"` // Can be one of "success", "failure", "neutral", "cancelled", "timed_out", or "action_required". (Optional. Required if you provide a status of "completed".) - StartedAt *Timestamp `json:"started_at,omitempty"` // The time that the check run began. (Optional.) - CompletedAt *Timestamp `json:"completed_at,omitempty"` // The time the check completed. (Optional. Required if you provide conclusion.) - Output *CheckRunOutput `json:"output,omitempty"` // Provide descriptive details about the run. (Optional) - Actions []*CheckRunAction `json:"actions,omitempty"` // Possible further actions the integrator can perform, which a user may trigger. (Optional.) -} - -// CheckRunAction exposes further actions the integrator can perform, which a user may trigger. -type CheckRunAction struct { - Label string `json:"label"` // The text to be displayed on a button in the web UI. The maximum size is 20 characters. (Required.) - Description string `json:"description"` // A short explanation of what this action would do. The maximum size is 40 characters. (Required.) - Identifier string `json:"identifier"` // A reference for the action on the integrator's system. The maximum size is 20 characters. (Required.) -} - -// CreateCheckRun creates a check run for repository. -// -// GitHub API docs: https://developer.github.com/v3/checks/runs/#create-a-check-run -func (s *ChecksService) CreateCheckRun(ctx context.Context, owner, repo string, opt CreateCheckRunOptions) (*CheckRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs", owner, repo) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkRun := new(CheckRun) - resp, err := s.client.Do(ctx, req, checkRun) - if err != nil { - return nil, resp, err - } - - return checkRun, resp, nil -} - -// UpdateCheckRunOptions sets up parameters needed to update a CheckRun. -type UpdateCheckRunOptions struct { - Name string `json:"name"` // The name of the check (e.g., "code-coverage"). (Required.) - HeadBranch *string `json:"head_branch,omitempty"` // The name of the branch to perform a check against. (Optional.) - HeadSHA *string `json:"head_sha,omitempty"` // The SHA of the commit. (Optional.) - DetailsURL *string `json:"details_url,omitempty"` // The URL of the integrator's site that has the full details of the check. (Optional.) - ExternalID *string `json:"external_id,omitempty"` // A reference for the run on the integrator's system. (Optional.) - Status *string `json:"status,omitempty"` // The current status. Can be one of "queued", "in_progress", or "completed". Default: "queued". (Optional.) - Conclusion *string `json:"conclusion,omitempty"` // Can be one of "success", "failure", "neutral", "cancelled", "timed_out", or "action_required". (Optional. Required if you provide a status of "completed".) - CompletedAt *Timestamp `json:"completed_at,omitempty"` // The time the check completed. (Optional. Required if you provide conclusion.) - Output *CheckRunOutput `json:"output,omitempty"` // Provide descriptive details about the run. (Optional) - Actions []*CheckRunAction `json:"actions,omitempty"` // Possible further actions the integrator can perform, which a user may trigger. (Optional.) -} - -// UpdateCheckRun updates a check run for a specific commit in a repository. -// -// GitHub API docs: https://developer.github.com/v3/checks/runs/#update-a-check-run -func (s *ChecksService) UpdateCheckRun(ctx context.Context, owner, repo string, checkRunID int64, opt UpdateCheckRunOptions) (*CheckRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v", owner, repo, checkRunID) - req, err := s.client.NewRequest("PATCH", u, opt) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkRun := new(CheckRun) - resp, err := s.client.Do(ctx, req, checkRun) - if err != nil { - return nil, resp, err - } - - return checkRun, resp, nil -} - -// ListCheckRunAnnotations lists the annotations for a check run. -// -// GitHub API docs: https://developer.github.com/v3/checks/runs/#list-annotations-for-a-check-run -func (s *ChecksService) ListCheckRunAnnotations(ctx context.Context, owner, repo string, checkRunID int64, opt *ListOptions) ([]*CheckRunAnnotation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v/annotations", owner, repo, checkRunID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkRunAnnotations []*CheckRunAnnotation - resp, err := s.client.Do(ctx, req, &checkRunAnnotations) - if err != nil { - return nil, resp, err - } - - return checkRunAnnotations, resp, nil -} - -// ListCheckRunsOptions represents parameters to list check runs. -type ListCheckRunsOptions struct { - CheckName *string `url:"check_name,omitempty"` // Returns check runs with the specified name. - Status *string `url:"status,omitempty"` // Returns check runs with the specified status. Can be one of "queued", "in_progress", or "completed". - Filter *string `url:"filter,omitempty"` // Filters check runs by their completed_at timestamp. Can be one of "latest" (returning the most recent check runs) or "all". Default: "latest" - - ListOptions -} - -// ListCheckRunsResults represents the result of a check run list. -type ListCheckRunsResults struct { - Total *int `json:"total_count,omitempty"` - CheckRuns []*CheckRun `json:"check_runs,omitempty"` -} - -// ListCheckRunsForRef lists check runs for a specific ref. -// -// GitHub API docs: https://developer.github.com/v3/checks/runs/#list-check-runs-for-a-specific-ref -func (s *ChecksService) ListCheckRunsForRef(ctx context.Context, owner, repo, ref string, opt *ListCheckRunsOptions) (*ListCheckRunsResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/check-runs", owner, repo, ref) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkRunResults *ListCheckRunsResults - resp, err := s.client.Do(ctx, req, &checkRunResults) - if err != nil { - return nil, resp, err - } - - return checkRunResults, resp, nil -} - -// ListCheckRunsCheckSuite lists check runs for a check suite. -// -// GitHub API docs: https://developer.github.com/v3/checks/runs/#list-check-runs-in-a-check-suite -func (s *ChecksService) ListCheckRunsCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64, opt *ListCheckRunsOptions) (*ListCheckRunsResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/%v/check-runs", owner, repo, checkSuiteID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkRunResults *ListCheckRunsResults - resp, err := s.client.Do(ctx, req, &checkRunResults) - if err != nil { - return nil, resp, err - } - - return checkRunResults, resp, nil -} - -// ListCheckSuiteOptions represents parameters to list check suites. -type ListCheckSuiteOptions struct { - CheckName *string `url:"check_name,omitempty"` // Filters checks suites by the name of the check run. - AppID *int `url:"app_id,omitempty"` // Filters check suites by GitHub App id. - - ListOptions -} - -// ListCheckSuiteResults represents the result of a check run list. -type ListCheckSuiteResults struct { - Total *int `json:"total_count,omitempty"` - CheckSuites []*CheckSuite `json:"check_suites,omitempty"` -} - -// ListCheckSuitesForRef lists check suite for a specific ref. -// -// GitHub API docs: https://developer.github.com/v3/checks/suites/#list-check-suites-for-a-specific-ref -func (s *ChecksService) ListCheckSuitesForRef(ctx context.Context, owner, repo, ref string, opt *ListCheckSuiteOptions) (*ListCheckSuiteResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/check-suites", owner, repo, ref) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkSuiteResults *ListCheckSuiteResults - resp, err := s.client.Do(ctx, req, &checkSuiteResults) - if err != nil { - return nil, resp, err - } - - return checkSuiteResults, resp, nil -} - -// AutoTriggerCheck enables or disables automatic creation of CheckSuite events upon pushes to the repository. -type AutoTriggerCheck struct { - AppID *int64 `json:"app_id,omitempty"` // The id of the GitHub App. (Required.) - Setting *bool `json:"setting,omitempty"` // Set to "true" to enable automatic creation of CheckSuite events upon pushes to the repository, or "false" to disable them. Default: "true" (Required.) -} - -// CheckSuitePreferenceOptions set options for check suite preferences for a repository. -type CheckSuitePreferenceOptions struct { - PreferenceList *PreferenceList `json:"auto_trigger_checks,omitempty"` // A list of auto trigger checks that can be set for a check suite in a repository. -} - -// CheckSuitePreferenceResults represents the results of the preference set operation. -type CheckSuitePreferenceResults struct { - Preferences *PreferenceList `json:"preferences,omitempty"` - Repository *Repository `json:"repository,omitempty"` -} - -// PreferenceList represents a list of auto trigger checks for repository -type PreferenceList struct { - AutoTriggerChecks []*AutoTriggerCheck `json:"auto_trigger_checks,omitempty"` // A slice of auto trigger checks that can be set for a check suite in a repository. -} - -// SetCheckSuitePreferences changes the default automatic flow when creating check suites. -// -// GitHub API docs: https://developer.github.com/v3/checks/suites/#set-preferences-for-check-suites-on-a-repository -func (s *ChecksService) SetCheckSuitePreferences(ctx context.Context, owner, repo string, opt CheckSuitePreferenceOptions) (*CheckSuitePreferenceResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/preferences", owner, repo) - req, err := s.client.NewRequest("PATCH", u, opt) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkSuitePrefResults *CheckSuitePreferenceResults - resp, err := s.client.Do(ctx, req, &checkSuitePrefResults) - if err != nil { - return nil, resp, err - } - - return checkSuitePrefResults, resp, nil -} - -// CreateCheckSuiteOptions sets up parameters to manually create a check suites -type CreateCheckSuiteOptions struct { - HeadSHA string `json:"head_sha"` // The sha of the head commit. (Required.) - HeadBranch *string `json:"head_branch,omitempty"` // The name of the head branch where the code changes are implemented. -} - -// CreateCheckSuite manually creates a check suite for a repository. -// -// GitHub API docs: https://developer.github.com/v3/checks/suites/#create-a-check-suite -func (s *ChecksService) CreateCheckSuite(ctx context.Context, owner, repo string, opt CreateCheckSuiteOptions) (*CheckSuite, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites", owner, repo) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkSuite := new(CheckSuite) - resp, err := s.client.Do(ctx, req, checkSuite) - if err != nil { - return nil, resp, err - } - - return checkSuite, resp, nil -} - -// ReRequestCheckSuite triggers GitHub to rerequest an existing check suite, without pushing new code to a repository. -// -// GitHub API docs: https://developer.github.com/v3/checks/suites/#rerequest-check-suite -func (s *ChecksService) ReRequestCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/%v/rerequest", owner, repo, checkSuiteID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - resp, err := s.client.Do(ctx, req, nil) - return resp, err -} diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go deleted file mode 100644 index 96445d2648a0..000000000000 --- a/vendor/github.com/google/go-github/github/doc.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package github provides a client for using the GitHub API. - -Usage: - - import "github.com/google/go-github/github" - -Construct a new GitHub client, then use the various services on the client to -access different parts of the GitHub API. For example: - - client := github.NewClient(nil) - - // list all organizations for user "willnorris" - orgs, _, err := client.Organizations.List(ctx, "willnorris", nil) - -Some API methods have optional parameters that can be passed. For example: - - client := github.NewClient(nil) - - // list public repositories for org "github" - opt := &github.RepositoryListByOrgOptions{Type: "public"} - repos, _, err := client.Repositories.ListByOrg(ctx, "github", opt) - -The services of a client divide the API into logical chunks and correspond to -the structure of the GitHub API documentation at -https://developer.github.com/v3/. - -NOTE: Using the https://godoc.org/context package, one can easily -pass cancelation signals and deadlines to various services of the client for -handling a request. In case there is no context available, then context.Background() -can be used as a starting point. - -For more sample code snippets, head over to the https://github.com/google/go-github/tree/master/example directory. - -Authentication - -The go-github library does not directly handle authentication. Instead, when -creating a new client, pass an http.Client that can handle authentication for -you. The easiest and recommended way to do this is using the golang.org/x/oauth2 -library, but you can always use any other library that provides an http.Client. -If you have an OAuth2 access token (for example, a personal API token), you can -use it with the oauth2 library using: - - import "golang.org/x/oauth2" - - func main() { - ctx := context.Background() - ts := oauth2.StaticTokenSource( - &oauth2.Token{AccessToken: "... your access token ..."}, - ) - tc := oauth2.NewClient(ctx, ts) - - client := github.NewClient(tc) - - // list all repositories for the authenticated user - repos, _, err := client.Repositories.List(ctx, "", nil) - } - -Note that when using an authenticated Client, all calls made by the client will -include the specified OAuth token. Therefore, authenticated clients should -almost never be shared between different users. - -See the oauth2 docs for complete instructions on using that library. - -For API methods that require HTTP Basic Authentication, use the -BasicAuthTransport. - -GitHub Apps authentication can be provided by the -https://github.com/bradleyfalzon/ghinstallation package. - - import "github.com/bradleyfalzon/ghinstallation" - - func main() { - // Wrap the shared transport for use with the integration ID 1 authenticating with installation ID 99. - itr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, 1, 99, "2016-10-19.private-key.pem") - if err != nil { - // Handle error. - } - - // Use installation transport with client - client := github.NewClient(&http.Client{Transport: itr}) - - // Use client... - } - -Rate Limiting - -GitHub imposes a rate limit on all API clients. Unauthenticated clients are -limited to 60 requests per hour, while authenticated clients can make up to -5,000 requests per hour. The Search API has a custom rate limit. Unauthenticated -clients are limited to 10 requests per minute, while authenticated clients -can make up to 30 requests per minute. To receive the higher rate limit when -making calls that are not issued on behalf of a user, -use UnauthenticatedRateLimitedTransport. - -The returned Response.Rate value contains the rate limit information -from the most recent API call. If a recent enough response isn't -available, you can use RateLimits to fetch the most up-to-date rate -limit data for the client. - -To detect an API rate limit error, you can check if its type is *github.RateLimitError: - - repos, _, err := client.Repositories.List(ctx, "", nil) - if _, ok := err.(*github.RateLimitError); ok { - log.Println("hit rate limit") - } - -Learn more about GitHub rate limiting at -https://developer.github.com/v3/#rate-limiting. - -Accepted Status - -Some endpoints may return a 202 Accepted status code, meaning that the -information required is not yet ready and was scheduled to be gathered on -the GitHub side. Methods known to behave like this are documented specifying -this behavior. - -To detect this condition of error, you can check if its type is -*github.AcceptedError: - - stats, _, err := client.Repositories.ListContributorsStats(ctx, org, repo) - if _, ok := err.(*github.AcceptedError); ok { - log.Println("scheduled on GitHub side") - } - -Conditional Requests - -The GitHub API has good support for conditional requests which will help -prevent you from burning through your rate limit, as well as help speed up your -application. go-github does not handle conditional requests directly, but is -instead designed to work with a caching http.Transport. We recommend using -https://github.com/gregjones/httpcache for that. - -Learn more about GitHub conditional requests at -https://developer.github.com/v3/#conditional-requests. - -Creating and Updating Resources - -All structs for GitHub resources use pointer values for all non-repeated fields. -This allows distinguishing between unset fields and those set to a zero-value. -Helper functions have been provided to easily create these pointers for string, -bool, and int values. For example: - - // create a new private repository named "foo" - repo := &github.Repository{ - Name: github.String("foo"), - Private: github.Bool(true), - } - client.Repositories.Create(ctx, "", repo) - -Users who have worked with protocol buffers should find this pattern familiar. - -Pagination - -All requests for resource collections (repos, pull requests, issues, etc.) -support pagination. Pagination options are described in the -github.ListOptions struct and passed to the list methods directly or as an -embedded type of a more specific list options struct (for example -github.PullRequestListOptions). Pages information is available via the -github.Response struct. - - client := github.NewClient(nil) - - opt := &github.RepositoryListByOrgOptions{ - ListOptions: github.ListOptions{PerPage: 10}, - } - // get all pages of results - var allRepos []*github.Repository - for { - repos, resp, err := client.Repositories.ListByOrg(ctx, "github", opt) - if err != nil { - return err - } - allRepos = append(allRepos, repos...) - if resp.NextPage == 0 { - break - } - opt.Page = resp.NextPage - } - -*/ -package github diff --git a/vendor/github.com/google/go-github/github/event.go b/vendor/github.com/google/go-github/github/event.go deleted file mode 100644 index 04c8845a1f77..000000000000 --- a/vendor/github.com/google/go-github/github/event.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "encoding/json" - "time" -) - -// Event represents a GitHub event. -type Event struct { - Type *string `json:"type,omitempty"` - Public *bool `json:"public,omitempty"` - RawPayload *json.RawMessage `json:"payload,omitempty"` - Repo *Repository `json:"repo,omitempty"` - Actor *User `json:"actor,omitempty"` - Org *Organization `json:"org,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - ID *string `json:"id,omitempty"` -} - -func (e Event) String() string { - return Stringify(e) -} - -// ParsePayload parses the event payload. For recognized event types, -// a value of the corresponding struct type will be returned. -func (e *Event) ParsePayload() (payload interface{}, err error) { - switch *e.Type { - case "CheckRunEvent": - payload = &CheckRunEvent{} - case "CheckSuiteEvent": - payload = &CheckSuiteEvent{} - case "CommitCommentEvent": - payload = &CommitCommentEvent{} - case "CreateEvent": - payload = &CreateEvent{} - case "DeleteEvent": - payload = &DeleteEvent{} - case "DeploymentEvent": - payload = &DeploymentEvent{} - case "DeploymentStatusEvent": - payload = &DeploymentStatusEvent{} - case "ForkEvent": - payload = &ForkEvent{} - case "GitHubAppAuthorizationEvent": - payload = &GitHubAppAuthorizationEvent{} - case "GollumEvent": - payload = &GollumEvent{} - case "InstallationEvent": - payload = &InstallationEvent{} - case "InstallationRepositoriesEvent": - payload = &InstallationRepositoriesEvent{} - case "IssueCommentEvent": - payload = &IssueCommentEvent{} - case "IssuesEvent": - payload = &IssuesEvent{} - case "LabelEvent": - payload = &LabelEvent{} - case "MarketplacePurchaseEvent": - payload = &MarketplacePurchaseEvent{} - case "MemberEvent": - payload = &MemberEvent{} - case "MembershipEvent": - payload = &MembershipEvent{} - case "MilestoneEvent": - payload = &MilestoneEvent{} - case "OrganizationEvent": - payload = &OrganizationEvent{} - case "OrgBlockEvent": - payload = &OrgBlockEvent{} - case "PageBuildEvent": - payload = &PageBuildEvent{} - case "PingEvent": - payload = &PingEvent{} - case "ProjectEvent": - payload = &ProjectEvent{} - case "ProjectCardEvent": - payload = &ProjectCardEvent{} - case "ProjectColumnEvent": - payload = &ProjectColumnEvent{} - case "PublicEvent": - payload = &PublicEvent{} - case "PullRequestEvent": - payload = &PullRequestEvent{} - case "PullRequestReviewEvent": - payload = &PullRequestReviewEvent{} - case "PullRequestReviewCommentEvent": - payload = &PullRequestReviewCommentEvent{} - case "PushEvent": - payload = &PushEvent{} - case "ReleaseEvent": - payload = &ReleaseEvent{} - case "RepositoryEvent": - payload = &RepositoryEvent{} - case "RepositoryVulnerabilityAlertEvent": - payload = &RepositoryVulnerabilityAlertEvent{} - case "StatusEvent": - payload = &StatusEvent{} - case "TeamEvent": - payload = &TeamEvent{} - case "TeamAddEvent": - payload = &TeamAddEvent{} - case "WatchEvent": - payload = &WatchEvent{} - } - err = json.Unmarshal(*e.RawPayload, &payload) - return payload, err -} - -// Payload returns the parsed event payload. For recognized event types, -// a value of the corresponding struct type will be returned. -// -// Deprecated: Use ParsePayload instead, which returns an error -// rather than panics if JSON unmarshaling raw payload fails. -func (e *Event) Payload() (payload interface{}) { - var err error - payload, err = e.ParsePayload() - if err != nil { - panic(err) - } - return payload -} diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go deleted file mode 100644 index 0d70a474c007..000000000000 --- a/vendor/github.com/google/go-github/github/event_types.go +++ /dev/null @@ -1,833 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// These event types are shared between the Events API and used as Webhook payloads. - -package github - -// RequestedAction is included in a CheckRunEvent when a user has invoked an action, -// i.e. when the CheckRunEvent's Action field is "requested_action". -type RequestedAction struct { - Identifier string `json:"identifier"` // The integrator reference of the action requested by the user. -} - -// CheckRunEvent is triggered when a check run is "created", "updated", or "re-requested". -// The Webhook event name is "check_run". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#checkrunevent -type CheckRunEvent struct { - CheckRun *CheckRun `json:"check_run,omitempty"` - // The action performed. Can be "created", "updated", "rerequested" or "requested_action". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - - // The action requested by the user. Populated when the Action is "requested_action". - RequestedAction *RequestedAction `json:"requested_action,omitempty"` // -} - -// CheckSuiteEvent is triggered when a check suite is "completed", "requested", or "re-requested". -// The Webhook event name is "check_suite". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#checksuiteevent -type CheckSuiteEvent struct { - CheckSuite *CheckSuite `json:"check_suite,omitempty"` - // The action performed. Can be "completed", "requested" or "re-requested". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// CommitCommentEvent is triggered when a commit comment is created. -// The Webhook event name is "commit_comment". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#commitcommentevent -type CommitCommentEvent struct { - Comment *RepositoryComment `json:"comment,omitempty"` - - // The following fields are only populated by Webhook events. - Action *string `json:"action,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// CreateEvent represents a created repository, branch, or tag. -// The Webhook event name is "create". -// -// Note: webhooks will not receive this event for created repositories. -// Additionally, webhooks will not receive this event for tags if more -// than three tags are pushed at once. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#createevent -type CreateEvent struct { - Ref *string `json:"ref,omitempty"` - // RefType is the object that was created. Possible values are: "repository", "branch", "tag". - RefType *string `json:"ref_type,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - Description *string `json:"description,omitempty"` - - // The following fields are only populated by Webhook events. - PusherType *string `json:"pusher_type,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeleteEvent represents a deleted branch or tag. -// The Webhook event name is "delete". -// -// Note: webhooks will not receive this event for tags if more than three tags -// are deleted at once. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#deleteevent -type DeleteEvent struct { - Ref *string `json:"ref,omitempty"` - // RefType is the object that was deleted. Possible values are: "branch", "tag". - RefType *string `json:"ref_type,omitempty"` - - // The following fields are only populated by Webhook events. - PusherType *string `json:"pusher_type,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeploymentEvent represents a deployment. -// The Webhook event name is "deployment". -// -// Events of this type are not visible in timelines, they are only used to trigger hooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#deploymentevent -type DeploymentEvent struct { - Deployment *Deployment `json:"deployment,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeploymentStatusEvent represents a deployment status. -// The Webhook event name is "deployment_status". -// -// Events of this type are not visible in timelines, they are only used to trigger hooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#deploymentstatusevent -type DeploymentStatusEvent struct { - Deployment *Deployment `json:"deployment,omitempty"` - DeploymentStatus *DeploymentStatus `json:"deployment_status,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ForkEvent is triggered when a user forks a repository. -// The Webhook event name is "fork". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#forkevent -type ForkEvent struct { - // Forkee is the created repository. - Forkee *Repository `json:"forkee,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// GitHubAppAuthorizationEvent is triggered when a user's authorization for a -// GitHub Application is revoked. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#githubappauthorizationevent -type GitHubAppAuthorizationEvent struct { - // The action performed. Can be "revoked". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` -} - -// Page represents a single Wiki page. -type Page struct { - PageName *string `json:"page_name,omitempty"` - Title *string `json:"title,omitempty"` - Summary *string `json:"summary,omitempty"` - Action *string `json:"action,omitempty"` - SHA *string `json:"sha,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` -} - -// GollumEvent is triggered when a Wiki page is created or updated. -// The Webhook event name is "gollum". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#gollumevent -type GollumEvent struct { - Pages []*Page `json:"pages,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// EditChange represents the changes when an issue, pull request, or comment has -// been edited. -type EditChange struct { - Title *struct { - From *string `json:"from,omitempty"` - } `json:"title,omitempty"` - Body *struct { - From *string `json:"from,omitempty"` - } `json:"body,omitempty"` -} - -// ProjectChange represents the changes when a project has been edited. -type ProjectChange struct { - Name *struct { - From *string `json:"from,omitempty"` - } `json:"name,omitempty"` - Body *struct { - From *string `json:"from,omitempty"` - } `json:"body,omitempty"` -} - -// ProjectCardChange represents the changes when a project card has been edited. -type ProjectCardChange struct { - Note *struct { - From *string `json:"from,omitempty"` - } `json:"note,omitempty"` -} - -// ProjectColumnChange represents the changes when a project column has been edited. -type ProjectColumnChange struct { - Name *struct { - From *string `json:"from,omitempty"` - } `json:"name,omitempty"` -} - -// TeamChange represents the changes when a team has been edited. -type TeamChange struct { - Description *struct { - From *string `json:"from,omitempty"` - } `json:"description,omitempty"` - Name *struct { - From *string `json:"from,omitempty"` - } `json:"name,omitempty"` - Privacy *struct { - From *string `json:"from,omitempty"` - } `json:"privacy,omitempty"` - Repository *struct { - Permissions *struct { - From *struct { - Admin *bool `json:"admin,omitempty"` - Pull *bool `json:"pull,omitempty"` - Push *bool `json:"push,omitempty"` - } `json:"from,omitempty"` - } `json:"permissions,omitempty"` - } `json:"repository,omitempty"` -} - -// InstallationEvent is triggered when a GitHub App has been installed or uninstalled. -// The Webhook event name is "installation". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#installationevent -type InstallationEvent struct { - // The action that was performed. Can be either "created" or "deleted". - Action *string `json:"action,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// InstallationRepositoriesEvent is triggered when a repository is added or -// removed from an installation. The Webhook event name is "installation_repositories". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#installationrepositoriesevent -type InstallationRepositoriesEvent struct { - // The action that was performed. Can be either "added" or "removed". - Action *string `json:"action,omitempty"` - RepositoriesAdded []*Repository `json:"repositories_added,omitempty"` - RepositoriesRemoved []*Repository `json:"repositories_removed,omitempty"` - RepositorySelection *string `json:"repository_selection,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// IssueCommentEvent is triggered when an issue comment is created on an issue -// or pull request. -// The Webhook event name is "issue_comment". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#issuecommentevent -type IssueCommentEvent struct { - // Action is the action that was performed on the comment. - // Possible values are: "created", "edited", "deleted". - Action *string `json:"action,omitempty"` - Issue *Issue `json:"issue,omitempty"` - Comment *IssueComment `json:"comment,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// IssuesEvent is triggered when an issue is assigned, unassigned, labeled, -// unlabeled, opened, closed, or reopened. -// The Webhook event name is "issues". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#issuesevent -type IssuesEvent struct { - // Action is the action that was performed. Possible values are: "assigned", - // "unassigned", "labeled", "unlabeled", "opened", "closed", "reopened", "edited". - Action *string `json:"action,omitempty"` - Issue *Issue `json:"issue,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Label *Label `json:"label,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// LabelEvent is triggered when a repository's label is created, edited, or deleted. -// The Webhook event name is "label" -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#labelevent -type LabelEvent struct { - // Action is the action that was performed. Possible values are: - // "created", "edited", "deleted" - Action *string `json:"action,omitempty"` - Label *Label `json:"label,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MarketplacePurchaseEvent is triggered when a user purchases, cancels, or changes -// their GitHub Marketplace plan. -// Webhook event name "marketplace_purchase". -// -// Github API docs: https://developer.github.com/v3/activity/events/types/#marketplacepurchaseevent -type MarketplacePurchaseEvent struct { - // Action is the action that was performed. Possible values are: - // "purchased", "cancelled", "pending_change", "pending_change_cancelled", "changed". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - EffectiveDate *Timestamp `json:"effective_date,omitempty"` - MarketplacePurchase *MarketplacePurchase `json:"marketplace_purchase,omitempty"` - PreviousMarketplacePurchase *MarketplacePurchase `json:"previous_marketplace_purchase,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MemberEvent is triggered when a user is added as a collaborator to a repository. -// The Webhook event name is "member". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#memberevent -type MemberEvent struct { - // Action is the action that was performed. Possible value is: "added". - Action *string `json:"action,omitempty"` - Member *User `json:"member,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MembershipEvent is triggered when a user is added or removed from a team. -// The Webhook event name is "membership". -// -// Events of this type are not visible in timelines, they are only used to -// trigger organization webhooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#membershipevent -type MembershipEvent struct { - // Action is the action that was performed. Possible values are: "added", "removed". - Action *string `json:"action,omitempty"` - // Scope is the scope of the membership. Possible value is: "team". - Scope *string `json:"scope,omitempty"` - Member *User `json:"member,omitempty"` - Team *Team `json:"team,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MilestoneEvent is triggered when a milestone is created, closed, opened, edited, or deleted. -// The Webhook event name is "milestone". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#milestoneevent -type MilestoneEvent struct { - // Action is the action that was performed. Possible values are: - // "created", "closed", "opened", "edited", "deleted" - Action *string `json:"action,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Org *Organization `json:"organization,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// OrganizationEvent is triggered when a user is added, removed, or invited to an organization. -// Events of this type are not visible in timelines. These events are only used to trigger organization hooks. -// Webhook event name is "organization". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#organizationevent -type OrganizationEvent struct { - // Action is the action that was performed. - // Can be one of "member_added", "member_removed", or "member_invited". - Action *string `json:"action,omitempty"` - - // Invitaion is the invitation for the user or email if the action is "member_invited". - Invitation *Invitation `json:"invitation,omitempty"` - - // Membership is the membership between the user and the organization. - // Not present when the action is "member_invited". - Membership *Membership `json:"membership,omitempty"` - - Organization *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// OrgBlockEvent is triggered when an organization blocks or unblocks a user. -// The Webhook event name is "org_block". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#orgblockevent -type OrgBlockEvent struct { - // Action is the action that was performed. - // Can be "blocked" or "unblocked". - Action *string `json:"action,omitempty"` - BlockedUser *User `json:"blocked_user,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - - // The following fields are only populated by Webhook events. - Installation *Installation `json:"installation,omitempty"` -} - -// PageBuildEvent represents an attempted build of a GitHub Pages site, whether -// successful or not. -// The Webhook event name is "page_build". -// -// This event is triggered on push to a GitHub Pages enabled branch (gh-pages -// for project pages, master for user and organization pages). -// -// Events of this type are not visible in timelines, they are only used to trigger hooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pagebuildevent -type PageBuildEvent struct { - Build *PagesBuild `json:"build,omitempty"` - - // The following fields are only populated by Webhook events. - ID *int64 `json:"id,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PingEvent is triggered when a Webhook is added to GitHub. -// -// GitHub API docs: https://developer.github.com/webhooks/#ping-event -type PingEvent struct { - // Random string of GitHub zen. - Zen *string `json:"zen,omitempty"` - // The ID of the webhook that triggered the ping. - HookID *int64 `json:"hook_id,omitempty"` - // The webhook configuration. - Hook *Hook `json:"hook,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ProjectEvent is triggered when project is created, modified or deleted. -// The webhook event name is "project". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#projectevent -type ProjectEvent struct { - Action *string `json:"action,omitempty"` - Changes *ProjectChange `json:"changes,omitempty"` - Project *Project `json:"project,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ProjectCardEvent is triggered when a project card is created, updated, moved, converted to an issue, or deleted. -// The webhook event name is "project_card". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#projectcardevent -type ProjectCardEvent struct { - Action *string `json:"action,omitempty"` - Changes *ProjectCardChange `json:"changes,omitempty"` - AfterID *int64 `json:"after_id,omitempty"` - ProjectCard *ProjectCard `json:"project_card,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ProjectColumnEvent is triggered when a project column is created, updated, moved, or deleted. -// The webhook event name is "project_column". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#projectcolumnevent -type ProjectColumnEvent struct { - Action *string `json:"action,omitempty"` - Changes *ProjectColumnChange `json:"changes,omitempty"` - AfterID *int64 `json:"after_id,omitempty"` - ProjectColumn *ProjectColumn `json:"project_column,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PublicEvent is triggered when a private repository is open sourced. -// According to GitHub: "Without a doubt: the best GitHub event." -// The Webhook event name is "public". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#publicevent -type PublicEvent struct { - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PullRequestEvent is triggered when a pull request is assigned, unassigned, -// labeled, unlabeled, opened, closed, reopened, or synchronized. -// The Webhook event name is "pull_request". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pullrequestevent -type PullRequestEvent struct { - // Action is the action that was performed. Possible values are: - // "assigned", "unassigned", "review_requested", "review_request_removed", "labeled", "unlabeled", - // "opened", "closed", "reopened", "synchronize", "edited". - // If the action is "closed" and the merged key is false, - // the pull request was closed with unmerged commits. If the action is "closed" - // and the merged key is true, the pull request was merged. - Action *string `json:"action,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Number *int `json:"number,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - // RequestedReviewer is populated in "review_requested", "review_request_removed" event deliveries. - // A request affecting multiple reviewers at once is split into multiple - // such event deliveries, each with a single, different RequestedReviewer. - RequestedReviewer *User `json:"requested_reviewer,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Label *Label `json:"label,omitempty"` // Populated in "labeled" event deliveries. - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` -} - -// PullRequestReviewEvent is triggered when a review is submitted on a pull -// request. -// The Webhook event name is "pull_request_review". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent -type PullRequestReviewEvent struct { - // Action is always "submitted". - Action *string `json:"action,omitempty"` - Review *PullRequestReview `json:"review,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` -} - -// PullRequestReviewCommentEvent is triggered when a comment is created on a -// portion of the unified diff of a pull request. -// The Webhook event name is "pull_request_review_comment". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent -type PullRequestReviewCommentEvent struct { - // Action is the action that was performed on the comment. - // Possible values are: "created", "edited", "deleted". - Action *string `json:"action,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - Comment *PullRequestComment `json:"comment,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PushEvent represents a git push to a GitHub repository. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pushevent -type PushEvent struct { - PushID *int64 `json:"push_id,omitempty"` - Head *string `json:"head,omitempty"` - Ref *string `json:"ref,omitempty"` - Size *int `json:"size,omitempty"` - Commits []PushEventCommit `json:"commits,omitempty"` - Before *string `json:"before,omitempty"` - DistinctSize *int `json:"distinct_size,omitempty"` - - // The following fields are only populated by Webhook events. - After *string `json:"after,omitempty"` - Created *bool `json:"created,omitempty"` - Deleted *bool `json:"deleted,omitempty"` - Forced *bool `json:"forced,omitempty"` - BaseRef *string `json:"base_ref,omitempty"` - Compare *string `json:"compare,omitempty"` - Repo *PushEventRepository `json:"repository,omitempty"` - HeadCommit *PushEventCommit `json:"head_commit,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -func (p PushEvent) String() string { - return Stringify(p) -} - -// PushEventCommit represents a git commit in a GitHub PushEvent. -type PushEventCommit struct { - Message *string `json:"message,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - URL *string `json:"url,omitempty"` - Distinct *bool `json:"distinct,omitempty"` - - // The following fields are only populated by Events API. - SHA *string `json:"sha,omitempty"` - - // The following fields are only populated by Webhook events. - ID *string `json:"id,omitempty"` - TreeID *string `json:"tree_id,omitempty"` - Timestamp *Timestamp `json:"timestamp,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Added []string `json:"added,omitempty"` - Removed []string `json:"removed,omitempty"` - Modified []string `json:"modified,omitempty"` -} - -func (p PushEventCommit) String() string { - return Stringify(p) -} - -// PushEventRepository represents the repo object in a PushEvent payload. -type PushEventRepository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Owner *User `json:"owner,omitempty"` - Private *bool `json:"private,omitempty"` - Description *string `json:"description,omitempty"` - Fork *bool `json:"fork,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Homepage *string `json:"homepage,omitempty"` - Size *int `json:"size,omitempty"` - StargazersCount *int `json:"stargazers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` - Language *string `json:"language,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasDownloads *bool `json:"has_downloads,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - HasPages *bool `json:"has_pages,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - Organization *string `json:"organization,omitempty"` - URL *string `json:"url,omitempty"` - ArchiveURL *string `json:"archive_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` -} - -// PushEventRepoOwner is a basic representation of user/org in a PushEvent payload. -type PushEventRepoOwner struct { - Name *string `json:"name,omitempty"` - Email *string `json:"email,omitempty"` -} - -// ReleaseEvent is triggered when a release is published. -// The Webhook event name is "release". -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#releaseevent -type ReleaseEvent struct { - // Action is the action that was performed. Possible value is: "published". - Action *string `json:"action,omitempty"` - Release *RepositoryRelease `json:"release,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// RepositoryEvent is triggered when a repository is created. -// The Webhook event name is "repository". -// -// Events of this type are not visible in timelines, they are only used to -// trigger organization webhooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#repositoryevent -type RepositoryEvent struct { - // Action is the action that was performed. Possible values are: "created", "deleted", - // "publicized", "privatized". - Action *string `json:"action,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#repositoryvulnerabilityalertevent -type RepositoryVulnerabilityAlertEvent struct { - // Action is the action that was performed. This can be: "create", "dismiss", "resolve". - Action *string `json:"action,omitempty"` - - //The security alert of the vulnerable dependency. - Alert *struct { - ID *int64 `json:"id,omitempty"` - AffectedRange *string `json:"affected_range,omitempty"` - AffectedPackageName *string `json:"affected_package_name,omitempty"` - ExternalReference *string `json:"external_reference,omitempty"` - ExternalIdentifier *string `json:"external_identifier,omitempty"` - FixedIn *string `json:"fixed_in,omitempty"` - Dismisser *User `json:"dismisser,omitempty"` - DismissReason *string `json:"dismiss_reason,omitempty"` - DismissedAt *Timestamp `json:"dismissed_at,omitempty"` - } `json:"alert,omitempty"` -} - -// StatusEvent is triggered when the status of a Git commit changes. -// The Webhook event name is "status". -// -// Events of this type are not visible in timelines, they are only used to -// trigger hooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#statusevent -type StatusEvent struct { - SHA *string `json:"sha,omitempty"` - // State is the new state. Possible values are: "pending", "success", "failure", "error". - State *string `json:"state,omitempty"` - Description *string `json:"description,omitempty"` - TargetURL *string `json:"target_url,omitempty"` - Branches []*Branch `json:"branches,omitempty"` - - // The following fields are only populated by Webhook events. - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Context *string `json:"context,omitempty"` - Commit *RepositoryCommit `json:"commit,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// TeamEvent is triggered when an organization's team is created, modified or deleted. -// The Webhook event name is "team". -// -// Events of this type are not visible in timelines. These events are only used -// to trigger hooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#teamevent -type TeamEvent struct { - Action *string `json:"action,omitempty"` - Team *Team `json:"team,omitempty"` - Changes *TeamChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// TeamAddEvent is triggered when a repository is added to a team. -// The Webhook event name is "team_add". -// -// Events of this type are not visible in timelines. These events are only used -// to trigger hooks. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#teamaddevent -type TeamAddEvent struct { - Team *Team `json:"team,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// WatchEvent is related to starring a repository, not watching. See this API -// blog post for an explanation: https://developer.github.com/changes/2012-09-05-watcher-api/ -// -// The event’s actor is the user who starred a repository, and the event’s -// repository is the repository that was starred. -// -// GitHub API docs: https://developer.github.com/v3/activity/events/types/#watchevent -type WatchEvent struct { - // Action is the action that was performed. Possible value is: "started". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} diff --git a/vendor/github.com/google/go-github/github/gen-accessors.go b/vendor/github.com/google/go-github/github/gen-accessors.go deleted file mode 100644 index fe92206fcf86..000000000000 --- a/vendor/github.com/google/go-github/github/gen-accessors.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// gen-accessors generates accessor methods for structs with pointer fields. -// -// It is meant to be used by the go-github authors in conjunction with the -// go generate tool before sending a commit to GitHub. -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "sort" - "strings" - "text/template" -) - -const ( - fileSuffix = "-accessors.go" -) - -var ( - verbose = flag.Bool("v", false, "Print verbose log messages") - - sourceTmpl = template.Must(template.New("source").Parse(source)) - - // blacklistStructMethod lists "struct.method" combos to skip. - blacklistStructMethod = map[string]bool{ - "RepositoryContent.GetContent": true, - "Client.GetBaseURL": true, - "Client.GetUploadURL": true, - "ErrorResponse.GetResponse": true, - "RateLimitError.GetResponse": true, - "AbuseRateLimitError.GetResponse": true, - } - // blacklistStruct lists structs to skip. - blacklistStruct = map[string]bool{ - "Client": true, - } -) - -func logf(fmt string, args ...interface{}) { - if *verbose { - log.Printf(fmt, args...) - } -} - -func main() { - flag.Parse() - fset := token.NewFileSet() - - pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0) - if err != nil { - log.Fatal(err) - return - } - - for pkgName, pkg := range pkgs { - t := &templateData{ - filename: pkgName + fileSuffix, - Year: 2017, - Package: pkgName, - Imports: map[string]string{}, - } - for filename, f := range pkg.Files { - logf("Processing %v...", filename) - if err := t.processAST(f); err != nil { - log.Fatal(err) - } - } - if err := t.dump(); err != nil { - log.Fatal(err) - } - } - logf("Done.") -} - -func (t *templateData) processAST(f *ast.File) error { - for _, decl := range f.Decls { - gd, ok := decl.(*ast.GenDecl) - if !ok { - continue - } - for _, spec := range gd.Specs { - ts, ok := spec.(*ast.TypeSpec) - if !ok { - continue - } - // Skip unexported identifiers. - if !ts.Name.IsExported() { - logf("Struct %v is unexported; skipping.", ts.Name) - continue - } - // Check if the struct is blacklisted. - if blacklistStruct[ts.Name.Name] { - logf("Struct %v is blacklisted; skipping.", ts.Name) - continue - } - st, ok := ts.Type.(*ast.StructType) - if !ok { - continue - } - for _, field := range st.Fields.List { - se, ok := field.Type.(*ast.StarExpr) - if len(field.Names) == 0 || !ok { - continue - } - - fieldName := field.Names[0] - // Skip unexported identifiers. - if !fieldName.IsExported() { - logf("Field %v is unexported; skipping.", fieldName) - continue - } - // Check if "struct.method" is blacklisted. - if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklistStructMethod[key] { - logf("Method %v is blacklisted; skipping.", key) - continue - } - - switch x := se.X.(type) { - case *ast.ArrayType: - t.addArrayType(x, ts.Name.String(), fieldName.String()) - case *ast.Ident: - t.addIdent(x, ts.Name.String(), fieldName.String()) - case *ast.MapType: - t.addMapType(x, ts.Name.String(), fieldName.String()) - case *ast.SelectorExpr: - t.addSelectorExpr(x, ts.Name.String(), fieldName.String()) - default: - logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x) - } - } - } - } - return nil -} - -func sourceFilter(fi os.FileInfo) bool { - return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix) -} - -func (t *templateData) dump() error { - if len(t.Getters) == 0 { - logf("No getters for %v; skipping.", t.filename) - return nil - } - - // Sort getters by ReceiverType.FieldName. - sort.Sort(byName(t.Getters)) - - var buf bytes.Buffer - if err := sourceTmpl.Execute(&buf, t); err != nil { - return err - } - clean, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - - logf("Writing %v...", t.filename) - return ioutil.WriteFile(t.filename, clean, 0644) -} - -func newGetter(receiverType, fieldName, fieldType, zeroValue string, namedStruct bool) *getter { - return &getter{ - sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName), - ReceiverVar: strings.ToLower(receiverType[:1]), - ReceiverType: receiverType, - FieldName: fieldName, - FieldType: fieldType, - ZeroValue: zeroValue, - NamedStruct: namedStruct, - } -} - -func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) { - var eltType string - switch elt := x.Elt.(type) { - case *ast.Ident: - eltType = elt.String() - default: - logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt) - return - } - - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil", false)) -} - -func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) { - var zeroValue string - var namedStruct = false - switch x.String() { - case "int", "int64": - zeroValue = "0" - case "string": - zeroValue = `""` - case "bool": - zeroValue = "false" - case "Timestamp": - zeroValue = "Timestamp{}" - default: - zeroValue = "nil" - namedStruct = true - } - - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue, namedStruct)) -} - -func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) { - var keyType string - switch key := x.Key.(type) { - case *ast.Ident: - keyType = key.String() - default: - logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key) - return - } - - var valueType string - switch value := x.Value.(type) { - case *ast.Ident: - valueType = value.String() - default: - logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value) - return - } - - fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType) - zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType) - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false)) -} - -func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) { - if strings.ToLower(fieldName[:1]) == fieldName[:1] { // Non-exported field. - return - } - - var xX string - if xx, ok := x.X.(*ast.Ident); ok { - xX = xx.String() - } - - switch xX { - case "time", "json": - if xX == "json" { - t.Imports["encoding/json"] = "encoding/json" - } else { - t.Imports[xX] = xX - } - fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name) - zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name) - if xX == "time" && x.Sel.Name == "Duration" { - zeroValue = "0" - } - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false)) - default: - logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x) - } -} - -type templateData struct { - filename string - Year int - Package string - Imports map[string]string - Getters []*getter -} - -type getter struct { - sortVal string // Lower-case version of "ReceiverType.FieldName". - ReceiverVar string // The one-letter variable name to match the ReceiverType. - ReceiverType string - FieldName string - FieldType string - ZeroValue string - NamedStruct bool // Getter for named struct. -} - -type byName []*getter - -func (b byName) Len() int { return len(b) } -func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal } -func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by gen-accessors; DO NOT EDIT. - -package {{.Package}} -{{with .Imports}} -import ( - {{- range . -}} - "{{.}}" - {{end -}} -) -{{end}} -{{range .Getters}} -{{if .NamedStruct}} -// Get{{.FieldName}} returns the {{.FieldName}} field. -func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() *{{.FieldType}} { - if {{.ReceiverVar}} == nil { - return {{.ZeroValue}} - } - return {{.ReceiverVar}}.{{.FieldName}} -} -{{else}} -// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise. -func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} { - if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil { - return {{.ZeroValue}} - } - return *{{.ReceiverVar}}.{{.FieldName}} -} -{{end}} -{{end}} -` diff --git a/vendor/github.com/google/go-github/github/gists.go b/vendor/github.com/google/go-github/github/gists.go deleted file mode 100644 index 15e0bc2cd9dd..000000000000 --- a/vendor/github.com/google/go-github/github/gists.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// GistsService handles communication with the Gist related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/gists/ -type GistsService service - -// Gist represents a GitHub's gist. -type Gist struct { - ID *string `json:"id,omitempty"` - Description *string `json:"description,omitempty"` - Public *bool `json:"public,omitempty"` - Owner *User `json:"owner,omitempty"` - Files map[GistFilename]GistFile `json:"files,omitempty"` - Comments *int `json:"comments,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GitPullURL *string `json:"git_pull_url,omitempty"` - GitPushURL *string `json:"git_push_url,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (g Gist) String() string { - return Stringify(g) -} - -// GistFilename represents filename on a gist. -type GistFilename string - -// GistFile represents a file on a gist. -type GistFile struct { - Size *int `json:"size,omitempty"` - Filename *string `json:"filename,omitempty"` - Language *string `json:"language,omitempty"` - Type *string `json:"type,omitempty"` - RawURL *string `json:"raw_url,omitempty"` - Content *string `json:"content,omitempty"` -} - -func (g GistFile) String() string { - return Stringify(g) -} - -// GistCommit represents a commit on a gist. -type GistCommit struct { - URL *string `json:"url,omitempty"` - Version *string `json:"version,omitempty"` - User *User `json:"user,omitempty"` - ChangeStatus *CommitStats `json:"change_status,omitempty"` - CommittedAt *Timestamp `json:"committed_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (gc GistCommit) String() string { - return Stringify(gc) -} - -// GistFork represents a fork of a gist. -type GistFork struct { - URL *string `json:"url,omitempty"` - User *User `json:"user,omitempty"` - ID *string `json:"id,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (gf GistFork) String() string { - return Stringify(gf) -} - -// GistListOptions specifies the optional parameters to the -// GistsService.List, GistsService.ListAll, and GistsService.ListStarred methods. -type GistListOptions struct { - // Since filters Gists by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// List gists for a user. Passing the empty string will list -// all public gists if called anonymously. However, if the call -// is authenticated, it will returns all gists for the authenticated -// user. -// -// GitHub API docs: https://developer.github.com/v3/gists/#list-gists -func (s *GistsService) List(ctx context.Context, user string, opt *GistListOptions) ([]*Gist, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/gists", user) - } else { - u = "gists" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gists []*Gist - resp, err := s.client.Do(ctx, req, &gists) - if err != nil { - return nil, resp, err - } - - return gists, resp, nil -} - -// ListAll lists all public gists. -// -// GitHub API docs: https://developer.github.com/v3/gists/#list-gists -func (s *GistsService) ListAll(ctx context.Context, opt *GistListOptions) ([]*Gist, *Response, error) { - u, err := addOptions("gists/public", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gists []*Gist - resp, err := s.client.Do(ctx, req, &gists) - if err != nil { - return nil, resp, err - } - - return gists, resp, nil -} - -// ListStarred lists starred gists of authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/gists/#list-gists -func (s *GistsService) ListStarred(ctx context.Context, opt *GistListOptions) ([]*Gist, *Response, error) { - u, err := addOptions("gists/starred", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gists []*Gist - resp, err := s.client.Do(ctx, req, &gists) - if err != nil { - return nil, resp, err - } - - return gists, resp, nil -} - -// Get a single gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#get-a-single-gist -func (s *GistsService) Get(ctx context.Context, id string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gist := new(Gist) - resp, err := s.client.Do(ctx, req, gist) - if err != nil { - return nil, resp, err - } - - return gist, resp, nil -} - -// GetRevision gets a specific revision of a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#get-a-specific-revision-of-a-gist -func (s *GistsService) GetRevision(ctx context.Context, id, sha string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v/%v", id, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gist := new(Gist) - resp, err := s.client.Do(ctx, req, gist) - if err != nil { - return nil, resp, err - } - - return gist, resp, nil -} - -// Create a gist for authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/gists/#create-a-gist -func (s *GistsService) Create(ctx context.Context, gist *Gist) (*Gist, *Response, error) { - u := "gists" - req, err := s.client.NewRequest("POST", u, gist) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(ctx, req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// Edit a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#edit-a-gist -func (s *GistsService) Edit(ctx context.Context, id string, gist *Gist) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("PATCH", u, gist) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(ctx, req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// ListCommits lists commits of a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#list-gist-commits -func (s *GistsService) ListCommits(ctx context.Context, id string, opt *ListOptions) ([]*GistCommit, *Response, error) { - u := fmt.Sprintf("gists/%v/commits", id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gistCommits []*GistCommit - resp, err := s.client.Do(ctx, req, &gistCommits) - if err != nil { - return nil, resp, err - } - - return gistCommits, resp, nil -} - -// Delete a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#delete-a-gist -func (s *GistsService) Delete(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// Star a gist on behalf of authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/gists/#star-a-gist -func (s *GistsService) Star(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// Unstar a gist on a behalf of authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/gists/#unstar-a-gist -func (s *GistsService) Unstar(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// IsStarred checks if a gist is starred by authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/gists/#check-if-a-gist-is-starred -func (s *GistsService) IsStarred(ctx context.Context, id string) (bool, *Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - resp, err := s.client.Do(ctx, req, nil) - starred, err := parseBoolResponse(err) - return starred, resp, err -} - -// Fork a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#fork-a-gist -func (s *GistsService) Fork(ctx context.Context, id string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v/forks", id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(ctx, req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// ListForks lists forks of a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#list-gist-forks -func (s *GistsService) ListForks(ctx context.Context, id string) ([]*GistFork, *Response, error) { - u := fmt.Sprintf("gists/%v/forks", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gistForks []*GistFork - resp, err := s.client.Do(ctx, req, &gistForks) - if err != nil { - return nil, resp, err - } - - return gistForks, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/gists_comments.go b/vendor/github.com/google/go-github/github/gists_comments.go deleted file mode 100644 index d5322e3d852e..000000000000 --- a/vendor/github.com/google/go-github/github/gists_comments.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// GistComment represents a Gist comment. -type GistComment struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` -} - -func (g GistComment) String() string { - return Stringify(g) -} - -// ListComments lists all comments for a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/comments/#list-comments-on-a-gist -func (s *GistsService) ListComments(ctx context.Context, gistID string, opt *ListOptions) ([]*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments", gistID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var comments []*GistComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment retrieves a single comment from a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/comments/#get-a-single-comment -func (s *GistsService) GetComment(ctx context.Context, gistID string, commentID int64) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// CreateComment creates a comment for a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/comments/#create-a-comment -func (s *GistsService) CreateComment(ctx context.Context, gistID string, comment *GistComment) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments", gistID) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// EditComment edits an existing gist comment. -// -// GitHub API docs: https://developer.github.com/v3/gists/comments/#edit-a-comment -func (s *GistsService) EditComment(ctx context.Context, gistID string, commentID int64, comment *GistComment) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes a gist comment. -// -// GitHub API docs: https://developer.github.com/v3/gists/comments/#delete-a-comment -func (s *GistsService) DeleteComment(ctx context.Context, gistID string, commentID int64) (*Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/git.go b/vendor/github.com/google/go-github/github/git.go deleted file mode 100644 index 1ce47437bd31..000000000000 --- a/vendor/github.com/google/go-github/github/git.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// GitService handles communication with the git data related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/git/ -type GitService service diff --git a/vendor/github.com/google/go-github/github/git_blobs.go b/vendor/github.com/google/go-github/github/git_blobs.go deleted file mode 100644 index 70aee14a7ae4..000000000000 --- a/vendor/github.com/google/go-github/github/git_blobs.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" -) - -// Blob represents a blob object. -type Blob struct { - Content *string `json:"content,omitempty"` - Encoding *string `json:"encoding,omitempty"` - SHA *string `json:"sha,omitempty"` - Size *int `json:"size,omitempty"` - URL *string `json:"url,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// GetBlob fetches a blob from a repo given a SHA. -// -// GitHub API docs: https://developer.github.com/v3/git/blobs/#get-a-blob -func (s *GitService) GetBlob(ctx context.Context, owner string, repo string, sha string) (*Blob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - blob := new(Blob) - resp, err := s.client.Do(ctx, req, blob) - return blob, resp, err -} - -// GetBlobRaw fetches a blob's contents from a repo. -// Unlike GetBlob, it returns the raw bytes rather than the base64-encoded data. -// -// GitHub API docs: https://developer.github.com/v3/git/blobs/#get-a-blob -func (s *GitService) GetBlobRaw(ctx context.Context, owner, repo, sha string) ([]byte, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - req.Header.Set("Accept", "application/vnd.github.v3.raw") - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - return buf.Bytes(), resp, err -} - -// CreateBlob creates a blob object. -// -// GitHub API docs: https://developer.github.com/v3/git/blobs/#create-a-blob -func (s *GitService) CreateBlob(ctx context.Context, owner string, repo string, blob *Blob) (*Blob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo) - req, err := s.client.NewRequest("POST", u, blob) - if err != nil { - return nil, nil, err - } - - t := new(Blob) - resp, err := s.client.Do(ctx, req, t) - return t, resp, err -} diff --git a/vendor/github.com/google/go-github/github/git_commits.go b/vendor/github.com/google/go-github/github/git_commits.go deleted file mode 100644 index 7638acbd69cb..000000000000 --- a/vendor/github.com/google/go-github/github/git_commits.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// SignatureVerification represents GPG signature verification. -type SignatureVerification struct { - Verified *bool `json:"verified,omitempty"` - Reason *string `json:"reason,omitempty"` - Signature *string `json:"signature,omitempty"` - Payload *string `json:"payload,omitempty"` -} - -// Commit represents a GitHub commit. -type Commit struct { - SHA *string `json:"sha,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Message *string `json:"message,omitempty"` - Tree *Tree `json:"tree,omitempty"` - Parents []Commit `json:"parents,omitempty"` - Stats *CommitStats `json:"stats,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - Verification *SignatureVerification `json:"verification,omitempty"` - NodeID *string `json:"node_id,omitempty"` - - // CommentCount is the number of GitHub comments on the commit. This - // is only populated for requests that fetch GitHub data like - // Pulls.ListCommits, Repositories.ListCommits, etc. - CommentCount *int `json:"comment_count,omitempty"` -} - -func (c Commit) String() string { - return Stringify(c) -} - -// CommitAuthor represents the author or committer of a commit. The commit -// author may not correspond to a GitHub User. -type CommitAuthor struct { - Date *time.Time `json:"date,omitempty"` - Name *string `json:"name,omitempty"` - Email *string `json:"email,omitempty"` - - // The following fields are only populated by Webhook events. - Login *string `json:"username,omitempty"` // Renamed for go-github consistency. -} - -func (c CommitAuthor) String() string { - return Stringify(c) -} - -// GetCommit fetches the Commit object for a given SHA. -// -// GitHub API docs: https://developer.github.com/v3/git/commits/#get-a-commit -func (s *GitService) GetCommit(ctx context.Context, owner string, repo string, sha string) (*Commit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// createCommit represents the body of a CreateCommit request. -type createCommit struct { - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Message *string `json:"message,omitempty"` - Tree *string `json:"tree,omitempty"` - Parents []string `json:"parents,omitempty"` -} - -// CreateCommit creates a new commit in a repository. -// commit must not be nil. -// -// The commit.Committer is optional and will be filled with the commit.Author -// data if omitted. If the commit.Author is omitted, it will be filled in with -// the authenticated user’s information and the current date. -// -// GitHub API docs: https://developer.github.com/v3/git/commits/#create-a-commit -func (s *GitService) CreateCommit(ctx context.Context, owner string, repo string, commit *Commit) (*Commit, *Response, error) { - if commit == nil { - return nil, nil, fmt.Errorf("commit must be provided") - } - - u := fmt.Sprintf("repos/%v/%v/git/commits", owner, repo) - - parents := make([]string, len(commit.Parents)) - for i, parent := range commit.Parents { - parents[i] = *parent.SHA - } - - body := &createCommit{ - Author: commit.Author, - Committer: commit.Committer, - Message: commit.Message, - Parents: parents, - } - if commit.Tree != nil { - body.Tree = commit.Tree.SHA - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/git_refs.go b/vendor/github.com/google/go-github/github/git_refs.go deleted file mode 100644 index 3b2ced2333a8..000000000000 --- a/vendor/github.com/google/go-github/github/git_refs.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" -) - -// Reference represents a GitHub reference. -type Reference struct { - Ref *string `json:"ref"` - URL *string `json:"url"` - Object *GitObject `json:"object"` - NodeID *string `json:"node_id,omitempty"` -} - -func (r Reference) String() string { - return Stringify(r) -} - -// GitObject represents a Git object. -type GitObject struct { - Type *string `json:"type"` - SHA *string `json:"sha"` - URL *string `json:"url"` -} - -func (o GitObject) String() string { - return Stringify(o) -} - -// createRefRequest represents the payload for creating a reference. -type createRefRequest struct { - Ref *string `json:"ref"` - SHA *string `json:"sha"` -} - -// updateRefRequest represents the payload for updating a reference. -type updateRefRequest struct { - SHA *string `json:"sha"` - Force *bool `json:"force"` -} - -// GetRef fetches a single Reference object for a given Git ref. -// If there is no exact match, GetRef will return an error. -// -// Note: The GitHub API can return multiple matches. -// If you wish to use this functionality please use the GetRefs() method. -// -// GitHub API docs: https://developer.github.com/v3/git/refs/#get-a-reference -func (s *GitService) GetRef(ctx context.Context, owner string, repo string, ref string) (*Reference, *Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(ctx, req, r) - if _, ok := err.(*json.UnmarshalTypeError); ok { - // Multiple refs, means there wasn't an exact match. - return nil, resp, errors.New("no exact match found for this ref") - } else if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// GetRefs fetches a slice of Reference objects for a given Git ref. -// If there is an exact match, only that ref is returned. -// If there is no exact match, GitHub returns all refs that start with ref. -// If returned error is nil, there will be at least 1 ref returned. -// For example: -// -// "heads/featureA" -> ["refs/heads/featureA"] // Exact match, single ref is returned. -// "heads/feature" -> ["refs/heads/featureA", "refs/heads/featureB"] // All refs that start with ref. -// "heads/notexist" -> [] // Returns an error. -// -// GitHub API docs: https://developer.github.com/v3/git/refs/#get-a-reference -func (s *GitService) GetRefs(ctx context.Context, owner string, repo string, ref string) ([]*Reference, *Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rawJSON json.RawMessage - resp, err := s.client.Do(ctx, req, &rawJSON) - if err != nil { - return nil, resp, err - } - - // Prioritize the most common case: a single returned ref. - r := new(Reference) - singleUnmarshalError := json.Unmarshal(rawJSON, r) - if singleUnmarshalError == nil { - return []*Reference{r}, resp, nil - } - - // Attempt to unmarshal multiple refs. - var rs []*Reference - multipleUnmarshalError := json.Unmarshal(rawJSON, &rs) - if multipleUnmarshalError == nil { - if len(rs) == 0 { - return nil, resp, fmt.Errorf("unexpected response from GitHub API: an array of refs with length 0") - } - return rs, resp, nil - } - - return nil, resp, fmt.Errorf("unmarshalling failed for both single and multiple refs: %s and %s", singleUnmarshalError, multipleUnmarshalError) -} - -// ReferenceListOptions specifies optional parameters to the -// GitService.ListRefs method. -type ReferenceListOptions struct { - Type string `url:"-"` - - ListOptions -} - -// ListRefs lists all refs in a repository. -// -// GitHub API docs: https://developer.github.com/v3/git/refs/#get-all-references -func (s *GitService) ListRefs(ctx context.Context, owner, repo string, opt *ReferenceListOptions) ([]*Reference, *Response, error) { - var u string - if opt != nil && opt.Type != "" { - u = fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, opt.Type) - } else { - u = fmt.Sprintf("repos/%v/%v/git/refs", owner, repo) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rs []*Reference - resp, err := s.client.Do(ctx, req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// CreateRef creates a new ref in a repository. -// -// GitHub API docs: https://developer.github.com/v3/git/refs/#create-a-reference -func (s *GitService) CreateRef(ctx context.Context, owner string, repo string, ref *Reference) (*Reference, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo) - req, err := s.client.NewRequest("POST", u, &createRefRequest{ - // back-compat with previous behavior that didn't require 'refs/' prefix - Ref: String("refs/" + strings.TrimPrefix(*ref.Ref, "refs/")), - SHA: ref.Object.SHA, - }) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdateRef updates an existing ref in a repository. -// -// GitHub API docs: https://developer.github.com/v3/git/refs/#update-a-reference -func (s *GitService) UpdateRef(ctx context.Context, owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) { - refPath := strings.TrimPrefix(*ref.Ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath) - req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{ - SHA: ref.Object.SHA, - Force: &force, - }) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DeleteRef deletes a ref from a repository. -// -// GitHub API docs: https://developer.github.com/v3/git/refs/#delete-a-reference -func (s *GitService) DeleteRef(ctx context.Context, owner string, repo string, ref string) (*Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/git_tags.go b/vendor/github.com/google/go-github/github/git_tags.go deleted file mode 100644 index abdbde6821b0..000000000000 --- a/vendor/github.com/google/go-github/github/git_tags.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Tag represents a tag object. -type Tag struct { - Tag *string `json:"tag,omitempty"` - SHA *string `json:"sha,omitempty"` - URL *string `json:"url,omitempty"` - Message *string `json:"message,omitempty"` - Tagger *CommitAuthor `json:"tagger,omitempty"` - Object *GitObject `json:"object,omitempty"` - Verification *SignatureVerification `json:"verification,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// createTagRequest represents the body of a CreateTag request. This is mostly -// identical to Tag with the exception that the object SHA and Type are -// top-level fields, rather than being nested inside a JSON object. -type createTagRequest struct { - Tag *string `json:"tag,omitempty"` - Message *string `json:"message,omitempty"` - Object *string `json:"object,omitempty"` - Type *string `json:"type,omitempty"` - Tagger *CommitAuthor `json:"tagger,omitempty"` -} - -// GetTag fetches a tag from a repo given a SHA. -// -// GitHub API docs: https://developer.github.com/v3/git/tags/#get-a-tag -func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha string) (*Tag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - tag := new(Tag) - resp, err := s.client.Do(ctx, req, tag) - return tag, resp, err -} - -// CreateTag creates a tag object. -// -// GitHub API docs: https://developer.github.com/v3/git/tags/#create-a-tag-object -func (s *GitService) CreateTag(ctx context.Context, owner string, repo string, tag *Tag) (*Tag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo) - - // convert Tag into a createTagRequest - tagRequest := &createTagRequest{ - Tag: tag.Tag, - Message: tag.Message, - Tagger: tag.Tagger, - } - if tag.Object != nil { - tagRequest.Object = tag.Object.SHA - tagRequest.Type = tag.Object.Type - } - - req, err := s.client.NewRequest("POST", u, tagRequest) - if err != nil { - return nil, nil, err - } - - t := new(Tag) - resp, err := s.client.Do(ctx, req, t) - return t, resp, err -} diff --git a/vendor/github.com/google/go-github/github/git_trees.go b/vendor/github.com/google/go-github/github/git_trees.go deleted file mode 100644 index 4bc2913541ad..000000000000 --- a/vendor/github.com/google/go-github/github/git_trees.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Tree represents a GitHub tree. -type Tree struct { - SHA *string `json:"sha,omitempty"` - Entries []TreeEntry `json:"tree,omitempty"` - - // Truncated is true if the number of items in the tree - // exceeded GitHub's maximum limit and the Entries were truncated - // in the response. Only populated for requests that fetch - // trees like Git.GetTree. - Truncated *bool `json:"truncated,omitempty"` -} - -func (t Tree) String() string { - return Stringify(t) -} - -// TreeEntry represents the contents of a tree structure. TreeEntry can -// represent either a blob, a commit (in the case of a submodule), or another -// tree. -type TreeEntry struct { - SHA *string `json:"sha,omitempty"` - Path *string `json:"path,omitempty"` - Mode *string `json:"mode,omitempty"` - Type *string `json:"type,omitempty"` - Size *int `json:"size,omitempty"` - Content *string `json:"content,omitempty"` - URL *string `json:"url,omitempty"` -} - -func (t TreeEntry) String() string { - return Stringify(t) -} - -// GetTree fetches the Tree object for a given sha hash from a repository. -// -// GitHub API docs: https://developer.github.com/v3/git/trees/#get-a-tree -func (s *GitService) GetTree(ctx context.Context, owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha) - if recursive { - u += "?recursive=1" - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Tree) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// createTree represents the body of a CreateTree request. -type createTree struct { - BaseTree string `json:"base_tree,omitempty"` - Entries []TreeEntry `json:"tree"` -} - -// CreateTree creates a new tree in a repository. If both a tree and a nested -// path modifying that tree are specified, it will overwrite the contents of -// that tree with the new path contents and write a new tree out. -// -// GitHub API docs: https://developer.github.com/v3/git/trees/#create-a-tree -func (s *GitService) CreateTree(ctx context.Context, owner string, repo string, baseTree string, entries []TreeEntry) (*Tree, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo) - - body := &createTree{ - BaseTree: baseTree, - Entries: entries, - } - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - t := new(Tree) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/github-accessors.go b/vendor/github.com/google/go-github/github/github-accessors.go deleted file mode 100644 index ac4257ea3809..000000000000 --- a/vendor/github.com/google/go-github/github/github-accessors.go +++ /dev/null @@ -1,12293 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by gen-accessors; DO NOT EDIT. - -package github - -import ( - "encoding/json" - "time" -) - -// GetRetryAfter returns the RetryAfter field if it's non-nil, zero value otherwise. -func (a *AbuseRateLimitError) GetRetryAfter() time.Duration { - if a == nil || a.RetryAfter == nil { - return 0 - } - return *a.RetryAfter -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *AdminEnforcement) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetComments returns the Comments field. -func (a *AdminStats) GetComments() *CommentStats { - if a == nil { - return nil - } - return a.Comments -} - -// GetGists returns the Gists field. -func (a *AdminStats) GetGists() *GistStats { - if a == nil { - return nil - } - return a.Gists -} - -// GetHooks returns the Hooks field. -func (a *AdminStats) GetHooks() *HookStats { - if a == nil { - return nil - } - return a.Hooks -} - -// GetIssues returns the Issues field. -func (a *AdminStats) GetIssues() *IssueStats { - if a == nil { - return nil - } - return a.Issues -} - -// GetMilestones returns the Milestones field. -func (a *AdminStats) GetMilestones() *MilestoneStats { - if a == nil { - return nil - } - return a.Milestones -} - -// GetOrgs returns the Orgs field. -func (a *AdminStats) GetOrgs() *OrgStats { - if a == nil { - return nil - } - return a.Orgs -} - -// GetPages returns the Pages field. -func (a *AdminStats) GetPages() *PageStats { - if a == nil { - return nil - } - return a.Pages -} - -// GetPulls returns the Pulls field. -func (a *AdminStats) GetPulls() *PullStats { - if a == nil { - return nil - } - return a.Pulls -} - -// GetRepos returns the Repos field. -func (a *AdminStats) GetRepos() *RepoStats { - if a == nil { - return nil - } - return a.Repos -} - -// GetUsers returns the Users field. -func (a *AdminStats) GetUsers() *UserStats { - if a == nil { - return nil - } - return a.Users -} - -// GetVerifiablePasswordAuthentication returns the VerifiablePasswordAuthentication field if it's non-nil, zero value otherwise. -func (a *APIMeta) GetVerifiablePasswordAuthentication() bool { - if a == nil || a.VerifiablePasswordAuthentication == nil { - return false - } - return *a.VerifiablePasswordAuthentication -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *App) GetCreatedAt() time.Time { - if a == nil || a.CreatedAt == nil { - return time.Time{} - } - return *a.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (a *App) GetDescription() string { - if a == nil || a.Description == nil { - return "" - } - return *a.Description -} - -// GetExternalURL returns the ExternalURL field if it's non-nil, zero value otherwise. -func (a *App) GetExternalURL() string { - if a == nil || a.ExternalURL == nil { - return "" - } - return *a.ExternalURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (a *App) GetHTMLURL() string { - if a == nil || a.HTMLURL == nil { - return "" - } - return *a.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *App) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *App) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (a *App) GetNodeID() string { - if a == nil || a.NodeID == nil { - return "" - } - return *a.NodeID -} - -// GetOwner returns the Owner field. -func (a *App) GetOwner() *User { - if a == nil { - return nil - } - return a.Owner -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *App) GetUpdatedAt() time.Time { - if a == nil || a.UpdatedAt == nil { - return time.Time{} - } - return *a.UpdatedAt -} - -// GetApp returns the App field. -func (a *Authorization) GetApp() *AuthorizationApp { - if a == nil { - return nil - } - return a.App -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *Authorization) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *Authorization) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise. -func (a *Authorization) GetHashedToken() string { - if a == nil || a.HashedToken == nil { - return "" - } - return *a.HashedToken -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *Authorization) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (a *Authorization) GetNote() string { - if a == nil || a.Note == nil { - return "" - } - return *a.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (a *Authorization) GetNoteURL() string { - if a == nil || a.NoteURL == nil { - return "" - } - return *a.NoteURL -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (a *Authorization) GetToken() string { - if a == nil || a.Token == nil { - return "" - } - return *a.Token -} - -// GetTokenLastEight returns the TokenLastEight field if it's non-nil, zero value otherwise. -func (a *Authorization) GetTokenLastEight() string { - if a == nil || a.TokenLastEight == nil { - return "" - } - return *a.TokenLastEight -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *Authorization) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *Authorization) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetUser returns the User field. -func (a *Authorization) GetUser() *User { - if a == nil { - return nil - } - return a.User -} - -// GetClientID returns the ClientID field if it's non-nil, zero value otherwise. -func (a *AuthorizationApp) GetClientID() string { - if a == nil || a.ClientID == nil { - return "" - } - return *a.ClientID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *AuthorizationApp) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *AuthorizationApp) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetClientID returns the ClientID field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetClientID() string { - if a == nil || a.ClientID == nil { - return "" - } - return *a.ClientID -} - -// GetClientSecret returns the ClientSecret field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetClientSecret() string { - if a == nil || a.ClientSecret == nil { - return "" - } - return *a.ClientSecret -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetNote() string { - if a == nil || a.Note == nil { - return "" - } - return *a.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetNoteURL() string { - if a == nil || a.NoteURL == nil { - return "" - } - return *a.NoteURL -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *AuthorizationUpdateRequest) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (a *AuthorizationUpdateRequest) GetNote() string { - if a == nil || a.Note == nil { - return "" - } - return *a.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (a *AuthorizationUpdateRequest) GetNoteURL() string { - if a == nil || a.NoteURL == nil { - return "" - } - return *a.NoteURL -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (a *AutoTriggerCheck) GetAppID() int64 { - if a == nil || a.AppID == nil { - return 0 - } - return *a.AppID -} - -// GetSetting returns the Setting field if it's non-nil, zero value otherwise. -func (a *AutoTriggerCheck) GetSetting() bool { - if a == nil || a.Setting == nil { - return false - } - return *a.Setting -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (b *Blob) GetContent() string { - if b == nil || b.Content == nil { - return "" - } - return *b.Content -} - -// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise. -func (b *Blob) GetEncoding() string { - if b == nil || b.Encoding == nil { - return "" - } - return *b.Encoding -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (b *Blob) GetNodeID() string { - if b == nil || b.NodeID == nil { - return "" - } - return *b.NodeID -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (b *Blob) GetSHA() string { - if b == nil || b.SHA == nil { - return "" - } - return *b.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (b *Blob) GetSize() int { - if b == nil || b.Size == nil { - return 0 - } - return *b.Size -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (b *Blob) GetURL() string { - if b == nil || b.URL == nil { - return "" - } - return *b.URL -} - -// GetCommit returns the Commit field. -func (b *Branch) GetCommit() *RepositoryCommit { - if b == nil { - return nil - } - return b.Commit -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (b *Branch) GetName() string { - if b == nil || b.Name == nil { - return "" - } - return *b.Name -} - -// GetProtected returns the Protected field if it's non-nil, zero value otherwise. -func (b *Branch) GetProtected() bool { - if b == nil || b.Protected == nil { - return false - } - return *b.Protected -} - -// GetApp returns the App field. -func (c *CheckRun) GetApp() *App { - if c == nil { - return nil - } - return c.App -} - -// GetCheckSuite returns the CheckSuite field. -func (c *CheckRun) GetCheckSuite() *CheckSuite { - if c == nil { - return nil - } - return c.CheckSuite -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetCompletedAt() Timestamp { - if c == nil || c.CompletedAt == nil { - return Timestamp{} - } - return *c.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetConclusion() string { - if c == nil || c.Conclusion == nil { - return "" - } - return *c.Conclusion -} - -// GetDetailsURL returns the DetailsURL field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetDetailsURL() string { - if c == nil || c.DetailsURL == nil { - return "" - } - return *c.DetailsURL -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetExternalID() string { - if c == nil || c.ExternalID == nil { - return "" - } - return *c.ExternalID -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetHeadSHA() string { - if c == nil || c.HeadSHA == nil { - return "" - } - return *c.HeadSHA -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetOutput returns the Output field. -func (c *CheckRun) GetOutput() *CheckRunOutput { - if c == nil { - return nil - } - return c.Output -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetStartedAt() Timestamp { - if c == nil || c.StartedAt == nil { - return Timestamp{} - } - return *c.StartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAnnotationLevel returns the AnnotationLevel field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetAnnotationLevel() string { - if c == nil || c.AnnotationLevel == nil { - return "" - } - return *c.AnnotationLevel -} - -// GetBlobHRef returns the BlobHRef field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetBlobHRef() string { - if c == nil || c.BlobHRef == nil { - return "" - } - return *c.BlobHRef -} - -// GetEndLine returns the EndLine field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetEndLine() int { - if c == nil || c.EndLine == nil { - return 0 - } - return *c.EndLine -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetMessage() string { - if c == nil || c.Message == nil { - return "" - } - return *c.Message -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetPath() string { - if c == nil || c.Path == nil { - return "" - } - return *c.Path -} - -// GetRawDetails returns the RawDetails field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetRawDetails() string { - if c == nil || c.RawDetails == nil { - return "" - } - return *c.RawDetails -} - -// GetStartLine returns the StartLine field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetStartLine() int { - if c == nil || c.StartLine == nil { - return 0 - } - return *c.StartLine -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetTitle() string { - if c == nil || c.Title == nil { - return "" - } - return *c.Title -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CheckRunEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetCheckRun returns the CheckRun field. -func (c *CheckRunEvent) GetCheckRun() *CheckRun { - if c == nil { - return nil - } - return c.CheckRun -} - -// GetInstallation returns the Installation field. -func (c *CheckRunEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetOrg returns the Org field. -func (c *CheckRunEvent) GetOrg() *Organization { - if c == nil { - return nil - } - return c.Org -} - -// GetRepo returns the Repo field. -func (c *CheckRunEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetRequestedAction returns the RequestedAction field. -func (c *CheckRunEvent) GetRequestedAction() *RequestedAction { - if c == nil { - return nil - } - return c.RequestedAction -} - -// GetSender returns the Sender field. -func (c *CheckRunEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetAlt returns the Alt field if it's non-nil, zero value otherwise. -func (c *CheckRunImage) GetAlt() string { - if c == nil || c.Alt == nil { - return "" - } - return *c.Alt -} - -// GetCaption returns the Caption field if it's non-nil, zero value otherwise. -func (c *CheckRunImage) GetCaption() string { - if c == nil || c.Caption == nil { - return "" - } - return *c.Caption -} - -// GetImageURL returns the ImageURL field if it's non-nil, zero value otherwise. -func (c *CheckRunImage) GetImageURL() string { - if c == nil || c.ImageURL == nil { - return "" - } - return *c.ImageURL -} - -// GetAnnotationsCount returns the AnnotationsCount field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetAnnotationsCount() int { - if c == nil || c.AnnotationsCount == nil { - return 0 - } - return *c.AnnotationsCount -} - -// GetAnnotationsURL returns the AnnotationsURL field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetAnnotationsURL() string { - if c == nil || c.AnnotationsURL == nil { - return "" - } - return *c.AnnotationsURL -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetSummary() string { - if c == nil || c.Summary == nil { - return "" - } - return *c.Summary -} - -// GetText returns the Text field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetText() string { - if c == nil || c.Text == nil { - return "" - } - return *c.Text -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetTitle() string { - if c == nil || c.Title == nil { - return "" - } - return *c.Title -} - -// GetAfterSHA returns the AfterSHA field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetAfterSHA() string { - if c == nil || c.AfterSHA == nil { - return "" - } - return *c.AfterSHA -} - -// GetApp returns the App field. -func (c *CheckSuite) GetApp() *App { - if c == nil { - return nil - } - return c.App -} - -// GetBeforeSHA returns the BeforeSHA field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetBeforeSHA() string { - if c == nil || c.BeforeSHA == nil { - return "" - } - return *c.BeforeSHA -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetConclusion() string { - if c == nil || c.Conclusion == nil { - return "" - } - return *c.Conclusion -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetHeadBranch() string { - if c == nil || c.HeadBranch == nil { - return "" - } - return *c.HeadBranch -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetHeadSHA() string { - if c == nil || c.HeadSHA == nil { - return "" - } - return *c.HeadSHA -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetRepository returns the Repository field. -func (c *CheckSuite) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CheckSuiteEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetCheckSuite returns the CheckSuite field. -func (c *CheckSuiteEvent) GetCheckSuite() *CheckSuite { - if c == nil { - return nil - } - return c.CheckSuite -} - -// GetInstallation returns the Installation field. -func (c *CheckSuiteEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetOrg returns the Org field. -func (c *CheckSuiteEvent) GetOrg() *Organization { - if c == nil { - return nil - } - return c.Org -} - -// GetRepo returns the Repo field. -func (c *CheckSuiteEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CheckSuiteEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetPreferenceList returns the PreferenceList field. -func (c *CheckSuitePreferenceOptions) GetPreferenceList() *PreferenceList { - if c == nil { - return nil - } - return c.PreferenceList -} - -// GetPreferences returns the Preferences field. -func (c *CheckSuitePreferenceResults) GetPreferences() *PreferenceList { - if c == nil { - return nil - } - return c.Preferences -} - -// GetRepository returns the Repository field. -func (c *CheckSuitePreferenceResults) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetBody() string { - if c == nil || c.Body == nil { - return "" - } - return *c.Body -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetKey() string { - if c == nil || c.Key == nil { - return "" - } - return *c.Key -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetPath() string { - if c == nil || c.Path == nil { - return "" - } - return *c.Path -} - -// GetRepository returns the Repository field. -func (c *CodeResult) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (c *CodeSearchResult) GetIncompleteResults() bool { - if c == nil || c.IncompleteResults == nil { - return false - } - return *c.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *CodeSearchResult) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetCommitURL() string { - if c == nil || c.CommitURL == nil { - return "" - } - return *c.CommitURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetRepositoryURL() string { - if c == nil || c.RepositoryURL == nil { - return "" - } - return *c.RepositoryURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetState() string { - if c == nil || c.State == nil { - return "" - } - return *c.State -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetTotalCount() int { - if c == nil || c.TotalCount == nil { - return 0 - } - return *c.TotalCount -} - -// GetTotalCommitComments returns the TotalCommitComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalCommitComments() int { - if c == nil || c.TotalCommitComments == nil { - return 0 - } - return *c.TotalCommitComments -} - -// GetTotalGistComments returns the TotalGistComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalGistComments() int { - if c == nil || c.TotalGistComments == nil { - return 0 - } - return *c.TotalGistComments -} - -// GetTotalIssueComments returns the TotalIssueComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalIssueComments() int { - if c == nil || c.TotalIssueComments == nil { - return 0 - } - return *c.TotalIssueComments -} - -// GetTotalPullRequestComments returns the TotalPullRequestComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalPullRequestComments() int { - if c == nil || c.TotalPullRequestComments == nil { - return 0 - } - return *c.TotalPullRequestComments -} - -// GetAuthor returns the Author field. -func (c *Commit) GetAuthor() *CommitAuthor { - if c == nil { - return nil - } - return c.Author -} - -// GetCommentCount returns the CommentCount field if it's non-nil, zero value otherwise. -func (c *Commit) GetCommentCount() int { - if c == nil || c.CommentCount == nil { - return 0 - } - return *c.CommentCount -} - -// GetCommitter returns the Committer field. -func (c *Commit) GetCommitter() *CommitAuthor { - if c == nil { - return nil - } - return c.Committer -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *Commit) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (c *Commit) GetMessage() string { - if c == nil || c.Message == nil { - return "" - } - return *c.Message -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *Commit) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *Commit) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetStats returns the Stats field. -func (c *Commit) GetStats() *CommitStats { - if c == nil { - return nil - } - return c.Stats -} - -// GetTree returns the Tree field. -func (c *Commit) GetTree() *Tree { - if c == nil { - return nil - } - return c.Tree -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *Commit) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetVerification returns the Verification field. -func (c *Commit) GetVerification() *SignatureVerification { - if c == nil { - return nil - } - return c.Verification -} - -// GetDate returns the Date field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetDate() time.Time { - if c == nil || c.Date == nil { - return time.Time{} - } - return *c.Date -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetEmail() string { - if c == nil || c.Email == nil { - return "" - } - return *c.Email -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetLogin() string { - if c == nil || c.Login == nil { - return "" - } - return *c.Login -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CommitCommentEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetComment returns the Comment field. -func (c *CommitCommentEvent) GetComment() *RepositoryComment { - if c == nil { - return nil - } - return c.Comment -} - -// GetInstallation returns the Installation field. -func (c *CommitCommentEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetRepo returns the Repo field. -func (c *CommitCommentEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CommitCommentEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetAdditions() int { - if c == nil || c.Additions == nil { - return 0 - } - return *c.Additions -} - -// GetBlobURL returns the BlobURL field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetBlobURL() string { - if c == nil || c.BlobURL == nil { - return "" - } - return *c.BlobURL -} - -// GetChanges returns the Changes field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetChanges() int { - if c == nil || c.Changes == nil { - return 0 - } - return *c.Changes -} - -// GetContentsURL returns the ContentsURL field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetContentsURL() string { - if c == nil || c.ContentsURL == nil { - return "" - } - return *c.ContentsURL -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetDeletions() int { - if c == nil || c.Deletions == nil { - return 0 - } - return *c.Deletions -} - -// GetFilename returns the Filename field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetFilename() string { - if c == nil || c.Filename == nil { - return "" - } - return *c.Filename -} - -// GetPatch returns the Patch field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetPatch() string { - if c == nil || c.Patch == nil { - return "" - } - return *c.Patch -} - -// GetPreviousFilename returns the PreviousFilename field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetPreviousFilename() string { - if c == nil || c.PreviousFilename == nil { - return "" - } - return *c.PreviousFilename -} - -// GetRawURL returns the RawURL field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetRawURL() string { - if c == nil || c.RawURL == nil { - return "" - } - return *c.RawURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetAuthor returns the Author field. -func (c *CommitResult) GetAuthor() *User { - if c == nil { - return nil - } - return c.Author -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetCommentsURL() string { - if c == nil || c.CommentsURL == nil { - return "" - } - return *c.CommentsURL -} - -// GetCommit returns the Commit field. -func (c *CommitResult) GetCommit() *Commit { - if c == nil { - return nil - } - return c.Commit -} - -// GetCommitter returns the Committer field. -func (c *CommitResult) GetCommitter() *User { - if c == nil { - return nil - } - return c.Committer -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetRepository returns the Repository field. -func (c *CommitResult) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetScore returns the Score field. -func (c *CommitResult) GetScore() *float64 { - if c == nil { - return nil - } - return c.Score -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAheadBy returns the AheadBy field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetAheadBy() int { - if c == nil || c.AheadBy == nil { - return 0 - } - return *c.AheadBy -} - -// GetBaseCommit returns the BaseCommit field. -func (c *CommitsComparison) GetBaseCommit() *RepositoryCommit { - if c == nil { - return nil - } - return c.BaseCommit -} - -// GetBehindBy returns the BehindBy field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetBehindBy() int { - if c == nil || c.BehindBy == nil { - return 0 - } - return *c.BehindBy -} - -// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetDiffURL() string { - if c == nil || c.DiffURL == nil { - return "" - } - return *c.DiffURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetMergeBaseCommit returns the MergeBaseCommit field. -func (c *CommitsComparison) GetMergeBaseCommit() *RepositoryCommit { - if c == nil { - return nil - } - return c.MergeBaseCommit -} - -// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetPatchURL() string { - if c == nil || c.PatchURL == nil { - return "" - } - return *c.PatchURL -} - -// GetPermalinkURL returns the PermalinkURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetPermalinkURL() string { - if c == nil || c.PermalinkURL == nil { - return "" - } - return *c.PermalinkURL -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetTotalCommits returns the TotalCommits field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetTotalCommits() int { - if c == nil || c.TotalCommits == nil { - return 0 - } - return *c.TotalCommits -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (c *CommitsSearchResult) GetIncompleteResults() bool { - if c == nil || c.IncompleteResults == nil { - return false - } - return *c.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *CommitsSearchResult) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (c *CommitStats) GetAdditions() int { - if c == nil || c.Additions == nil { - return 0 - } - return *c.Additions -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (c *CommitStats) GetDeletions() int { - if c == nil || c.Deletions == nil { - return 0 - } - return *c.Deletions -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *CommitStats) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetCodeOfConduct returns the CodeOfConduct field. -func (c *CommunityHealthFiles) GetCodeOfConduct() *Metric { - if c == nil { - return nil - } - return c.CodeOfConduct -} - -// GetContributing returns the Contributing field. -func (c *CommunityHealthFiles) GetContributing() *Metric { - if c == nil { - return nil - } - return c.Contributing -} - -// GetIssueTemplate returns the IssueTemplate field. -func (c *CommunityHealthFiles) GetIssueTemplate() *Metric { - if c == nil { - return nil - } - return c.IssueTemplate -} - -// GetLicense returns the License field. -func (c *CommunityHealthFiles) GetLicense() *Metric { - if c == nil { - return nil - } - return c.License -} - -// GetPullRequestTemplate returns the PullRequestTemplate field. -func (c *CommunityHealthFiles) GetPullRequestTemplate() *Metric { - if c == nil { - return nil - } - return c.PullRequestTemplate -} - -// GetReadme returns the Readme field. -func (c *CommunityHealthFiles) GetReadme() *Metric { - if c == nil { - return nil - } - return c.Readme -} - -// GetFiles returns the Files field. -func (c *CommunityHealthMetrics) GetFiles() *CommunityHealthFiles { - if c == nil { - return nil - } - return c.Files -} - -// GetHealthPercentage returns the HealthPercentage field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetHealthPercentage() int { - if c == nil || c.HealthPercentage == nil { - return 0 - } - return *c.HealthPercentage -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetUpdatedAt() time.Time { - if c == nil || c.UpdatedAt == nil { - return time.Time{} - } - return *c.UpdatedAt -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetAvatarURL() string { - if c == nil || c.AvatarURL == nil { - return "" - } - return *c.AvatarURL -} - -// GetContributions returns the Contributions field if it's non-nil, zero value otherwise. -func (c *Contributor) GetContributions() int { - if c == nil || c.Contributions == nil { - return 0 - } - return *c.Contributions -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetEventsURL() string { - if c == nil || c.EventsURL == nil { - return "" - } - return *c.EventsURL -} - -// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetFollowersURL() string { - if c == nil || c.FollowersURL == nil { - return "" - } - return *c.FollowersURL -} - -// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetFollowingURL() string { - if c == nil || c.FollowingURL == nil { - return "" - } - return *c.FollowingURL -} - -// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetGistsURL() string { - if c == nil || c.GistsURL == nil { - return "" - } - return *c.GistsURL -} - -// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise. -func (c *Contributor) GetGravatarID() string { - if c == nil || c.GravatarID == nil { - return "" - } - return *c.GravatarID -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *Contributor) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (c *Contributor) GetLogin() string { - if c == nil || c.Login == nil { - return "" - } - return *c.Login -} - -// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetOrganizationsURL() string { - if c == nil || c.OrganizationsURL == nil { - return "" - } - return *c.OrganizationsURL -} - -// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetReceivedEventsURL() string { - if c == nil || c.ReceivedEventsURL == nil { - return "" - } - return *c.ReceivedEventsURL -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetReposURL() string { - if c == nil || c.ReposURL == nil { - return "" - } - return *c.ReposURL -} - -// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. -func (c *Contributor) GetSiteAdmin() bool { - if c == nil || c.SiteAdmin == nil { - return false - } - return *c.SiteAdmin -} - -// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetStarredURL() string { - if c == nil || c.StarredURL == nil { - return "" - } - return *c.StarredURL -} - -// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetSubscriptionsURL() string { - if c == nil || c.SubscriptionsURL == nil { - return "" - } - return *c.SubscriptionsURL -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (c *Contributor) GetType() string { - if c == nil || c.Type == nil { - return "" - } - return *c.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAuthor returns the Author field. -func (c *ContributorStats) GetAuthor() *Contributor { - if c == nil { - return nil - } - return c.Author -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *ContributorStats) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetCompletedAt() Timestamp { - if c == nil || c.CompletedAt == nil { - return Timestamp{} - } - return *c.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetConclusion() string { - if c == nil || c.Conclusion == nil { - return "" - } - return *c.Conclusion -} - -// GetDetailsURL returns the DetailsURL field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetDetailsURL() string { - if c == nil || c.DetailsURL == nil { - return "" - } - return *c.DetailsURL -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetExternalID() string { - if c == nil || c.ExternalID == nil { - return "" - } - return *c.ExternalID -} - -// GetOutput returns the Output field. -func (c *CreateCheckRunOptions) GetOutput() *CheckRunOutput { - if c == nil { - return nil - } - return c.Output -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetStartedAt() Timestamp { - if c == nil || c.StartedAt == nil { - return Timestamp{} - } - return *c.StartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (c *CreateCheckSuiteOptions) GetHeadBranch() string { - if c == nil || c.HeadBranch == nil { - return "" - } - return *c.HeadBranch -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetDescription() string { - if c == nil || c.Description == nil { - return "" - } - return *c.Description -} - -// GetInstallation returns the Installation field. -func (c *CreateEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetMasterBranch() string { - if c == nil || c.MasterBranch == nil { - return "" - } - return *c.MasterBranch -} - -// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetPusherType() string { - if c == nil || c.PusherType == nil { - return "" - } - return *c.PusherType -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetRef() string { - if c == nil || c.Ref == nil { - return "" - } - return *c.Ref -} - -// GetRefType returns the RefType field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetRefType() string { - if c == nil || c.RefType == nil { - return "" - } - return *c.RefType -} - -// GetRepo returns the Repo field. -func (c *CreateEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CreateEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (c *CreateOrgInvitationOptions) GetEmail() string { - if c == nil || c.Email == nil { - return "" - } - return *c.Email -} - -// GetInviteeID returns the InviteeID field if it's non-nil, zero value otherwise. -func (c *CreateOrgInvitationOptions) GetInviteeID() int64 { - if c == nil || c.InviteeID == nil { - return 0 - } - return *c.InviteeID -} - -// GetRole returns the Role field if it's non-nil, zero value otherwise. -func (c *CreateOrgInvitationOptions) GetRole() string { - if c == nil || c.Role == nil { - return "" - } - return *c.Role -} - -// GetInstallation returns the Installation field. -func (d *DeleteEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetPusherType() string { - if d == nil || d.PusherType == nil { - return "" - } - return *d.PusherType -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetRef() string { - if d == nil || d.Ref == nil { - return "" - } - return *d.Ref -} - -// GetRefType returns the RefType field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetRefType() string { - if d == nil || d.RefType == nil { - return "" - } - return *d.RefType -} - -// GetRepo returns the Repo field. -func (d *DeleteEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeleteEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *Deployment) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetCreator returns the Creator field. -func (d *Deployment) GetCreator() *User { - if d == nil { - return nil - } - return d.Creator -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *Deployment) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *Deployment) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *Deployment) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *Deployment) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *Deployment) GetRef() string { - if d == nil || d.Ref == nil { - return "" - } - return *d.Ref -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (d *Deployment) GetRepositoryURL() string { - if d == nil || d.RepositoryURL == nil { - return "" - } - return *d.RepositoryURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (d *Deployment) GetSHA() string { - if d == nil || d.SHA == nil { - return "" - } - return *d.SHA -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (d *Deployment) GetStatusesURL() string { - if d == nil || d.StatusesURL == nil { - return "" - } - return *d.StatusesURL -} - -// GetTask returns the Task field if it's non-nil, zero value otherwise. -func (d *Deployment) GetTask() string { - if d == nil || d.Task == nil { - return "" - } - return *d.Task -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *Deployment) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (d *Deployment) GetURL() string { - if d == nil || d.URL == nil { - return "" - } - return *d.URL -} - -// GetDeployment returns the Deployment field. -func (d *DeploymentEvent) GetDeployment() *Deployment { - if d == nil { - return nil - } - return d.Deployment -} - -// GetInstallation returns the Installation field. -func (d *DeploymentEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetRepo returns the Repo field. -func (d *DeploymentEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeploymentEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetAutoMerge returns the AutoMerge field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetAutoMerge() bool { - if d == nil || d.AutoMerge == nil { - return false - } - return *d.AutoMerge -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetPayload returns the Payload field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetPayload() string { - if d == nil || d.Payload == nil { - return "" - } - return *d.Payload -} - -// GetProductionEnvironment returns the ProductionEnvironment field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetProductionEnvironment() bool { - if d == nil || d.ProductionEnvironment == nil { - return false - } - return *d.ProductionEnvironment -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetRef() string { - if d == nil || d.Ref == nil { - return "" - } - return *d.Ref -} - -// GetRequiredContexts returns the RequiredContexts field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetRequiredContexts() []string { - if d == nil || d.RequiredContexts == nil { - return nil - } - return *d.RequiredContexts -} - -// GetTask returns the Task field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetTask() string { - if d == nil || d.Task == nil { - return "" - } - return *d.Task -} - -// GetTransientEnvironment returns the TransientEnvironment field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetTransientEnvironment() bool { - if d == nil || d.TransientEnvironment == nil { - return false - } - return *d.TransientEnvironment -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetCreator returns the Creator field. -func (d *DeploymentStatus) GetCreator() *User { - if d == nil { - return nil - } - return d.Creator -} - -// GetDeploymentURL returns the DeploymentURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetDeploymentURL() string { - if d == nil || d.DeploymentURL == nil { - return "" - } - return *d.DeploymentURL -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetRepositoryURL() string { - if d == nil || d.RepositoryURL == nil { - return "" - } - return *d.RepositoryURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetTargetURL() string { - if d == nil || d.TargetURL == nil { - return "" - } - return *d.TargetURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetDeployment returns the Deployment field. -func (d *DeploymentStatusEvent) GetDeployment() *Deployment { - if d == nil { - return nil - } - return d.Deployment -} - -// GetDeploymentStatus returns the DeploymentStatus field. -func (d *DeploymentStatusEvent) GetDeploymentStatus() *DeploymentStatus { - if d == nil { - return nil - } - return d.DeploymentStatus -} - -// GetInstallation returns the Installation field. -func (d *DeploymentStatusEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetRepo returns the Repo field. -func (d *DeploymentStatusEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeploymentStatusEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetAutoInactive returns the AutoInactive field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetAutoInactive() bool { - if d == nil || d.AutoInactive == nil { - return false - } - return *d.AutoInactive -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetEnvironmentURL returns the EnvironmentURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetEnvironmentURL() string { - if d == nil || d.EnvironmentURL == nil { - return "" - } - return *d.EnvironmentURL -} - -// GetLogURL returns the LogURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetLogURL() string { - if d == nil || d.LogURL == nil { - return "" - } - return *d.LogURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetAuthor returns the Author field. -func (d *DiscussionComment) GetAuthor() *User { - if d == nil { - return nil - } - return d.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetBody() string { - if d == nil || d.Body == nil { - return "" - } - return *d.Body -} - -// GetBodyHTML returns the BodyHTML field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetBodyHTML() string { - if d == nil || d.BodyHTML == nil { - return "" - } - return *d.BodyHTML -} - -// GetBodyVersion returns the BodyVersion field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetBodyVersion() string { - if d == nil || d.BodyVersion == nil { - return "" - } - return *d.BodyVersion -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetDiscussionURL returns the DiscussionURL field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetDiscussionURL() string { - if d == nil || d.DiscussionURL == nil { - return "" - } - return *d.DiscussionURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetHTMLURL() string { - if d == nil || d.HTMLURL == nil { - return "" - } - return *d.HTMLURL -} - -// GetLastEditedAt returns the LastEditedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetLastEditedAt() Timestamp { - if d == nil || d.LastEditedAt == nil { - return Timestamp{} - } - return *d.LastEditedAt -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetNumber() int { - if d == nil || d.Number == nil { - return 0 - } - return *d.Number -} - -// GetReactions returns the Reactions field. -func (d *DiscussionComment) GetReactions() *Reactions { - if d == nil { - return nil - } - return d.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetURL() string { - if d == nil || d.URL == nil { - return "" - } - return *d.URL -} - -// GetTeams returns the Teams field if it's non-nil, zero value otherwise. -func (d *DismissalRestrictionsRequest) GetTeams() []string { - if d == nil || d.Teams == nil { - return nil - } - return *d.Teams -} - -// GetUsers returns the Users field if it's non-nil, zero value otherwise. -func (d *DismissalRestrictionsRequest) GetUsers() []string { - if d == nil || d.Users == nil { - return nil - } - return *d.Users -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetBody() string { - if d == nil || d.Body == nil { - return "" - } - return *d.Body -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetPath() string { - if d == nil || d.Path == nil { - return "" - } - return *d.Path -} - -// GetPosition returns the Position field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetPosition() int { - if d == nil || d.Position == nil { - return 0 - } - return *d.Position -} - -// GetActor returns the Actor field. -func (e *Event) GetActor() *User { - if e == nil { - return nil - } - return e.Actor -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (e *Event) GetCreatedAt() time.Time { - if e == nil || e.CreatedAt == nil { - return time.Time{} - } - return *e.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (e *Event) GetID() string { - if e == nil || e.ID == nil { - return "" - } - return *e.ID -} - -// GetOrg returns the Org field. -func (e *Event) GetOrg() *Organization { - if e == nil { - return nil - } - return e.Org -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (e *Event) GetPublic() bool { - if e == nil || e.Public == nil { - return false - } - return *e.Public -} - -// GetRawPayload returns the RawPayload field if it's non-nil, zero value otherwise. -func (e *Event) GetRawPayload() json.RawMessage { - if e == nil || e.RawPayload == nil { - return json.RawMessage{} - } - return *e.RawPayload -} - -// GetRepo returns the Repo field. -func (e *Event) GetRepo() *Repository { - if e == nil { - return nil - } - return e.Repo -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (e *Event) GetType() string { - if e == nil || e.Type == nil { - return "" - } - return *e.Type -} - -// GetHRef returns the HRef field if it's non-nil, zero value otherwise. -func (f *FeedLink) GetHRef() string { - if f == nil || f.HRef == nil { - return "" - } - return *f.HRef -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (f *FeedLink) GetType() string { - if f == nil || f.Type == nil { - return "" - } - return *f.Type -} - -// GetCurrentUserActorURL returns the CurrentUserActorURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserActorURL() string { - if f == nil || f.CurrentUserActorURL == nil { - return "" - } - return *f.CurrentUserActorURL -} - -// GetCurrentUserOrganizationURL returns the CurrentUserOrganizationURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserOrganizationURL() string { - if f == nil || f.CurrentUserOrganizationURL == nil { - return "" - } - return *f.CurrentUserOrganizationURL -} - -// GetCurrentUserPublicURL returns the CurrentUserPublicURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserPublicURL() string { - if f == nil || f.CurrentUserPublicURL == nil { - return "" - } - return *f.CurrentUserPublicURL -} - -// GetCurrentUserURL returns the CurrentUserURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserURL() string { - if f == nil || f.CurrentUserURL == nil { - return "" - } - return *f.CurrentUserURL -} - -// GetTimelineURL returns the TimelineURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetTimelineURL() string { - if f == nil || f.TimelineURL == nil { - return "" - } - return *f.TimelineURL -} - -// GetUserURL returns the UserURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetUserURL() string { - if f == nil || f.UserURL == nil { - return "" - } - return *f.UserURL -} - -// GetForkee returns the Forkee field. -func (f *ForkEvent) GetForkee() *Repository { - if f == nil { - return nil - } - return f.Forkee -} - -// GetInstallation returns the Installation field. -func (f *ForkEvent) GetInstallation() *Installation { - if f == nil { - return nil - } - return f.Installation -} - -// GetRepo returns the Repo field. -func (f *ForkEvent) GetRepo() *Repository { - if f == nil { - return nil - } - return f.Repo -} - -// GetSender returns the Sender field. -func (f *ForkEvent) GetSender() *User { - if f == nil { - return nil - } - return f.Sender -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (g *Gist) GetComments() int { - if g == nil || g.Comments == nil { - return 0 - } - return *g.Comments -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *Gist) GetCreatedAt() time.Time { - if g == nil || g.CreatedAt == nil { - return time.Time{} - } - return *g.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (g *Gist) GetDescription() string { - if g == nil || g.Description == nil { - return "" - } - return *g.Description -} - -// GetGitPullURL returns the GitPullURL field if it's non-nil, zero value otherwise. -func (g *Gist) GetGitPullURL() string { - if g == nil || g.GitPullURL == nil { - return "" - } - return *g.GitPullURL -} - -// GetGitPushURL returns the GitPushURL field if it's non-nil, zero value otherwise. -func (g *Gist) GetGitPushURL() string { - if g == nil || g.GitPushURL == nil { - return "" - } - return *g.GitPushURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (g *Gist) GetHTMLURL() string { - if g == nil || g.HTMLURL == nil { - return "" - } - return *g.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *Gist) GetID() string { - if g == nil || g.ID == nil { - return "" - } - return *g.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (g *Gist) GetNodeID() string { - if g == nil || g.NodeID == nil { - return "" - } - return *g.NodeID -} - -// GetOwner returns the Owner field. -func (g *Gist) GetOwner() *User { - if g == nil { - return nil - } - return g.Owner -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (g *Gist) GetPublic() bool { - if g == nil || g.Public == nil { - return false - } - return *g.Public -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *Gist) GetUpdatedAt() time.Time { - if g == nil || g.UpdatedAt == nil { - return time.Time{} - } - return *g.UpdatedAt -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (g *GistComment) GetBody() string { - if g == nil || g.Body == nil { - return "" - } - return *g.Body -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GistComment) GetCreatedAt() time.Time { - if g == nil || g.CreatedAt == nil { - return time.Time{} - } - return *g.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *GistComment) GetID() int64 { - if g == nil || g.ID == nil { - return 0 - } - return *g.ID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GistComment) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetUser returns the User field. -func (g *GistComment) GetUser() *User { - if g == nil { - return nil - } - return g.User -} - -// GetChangeStatus returns the ChangeStatus field. -func (g *GistCommit) GetChangeStatus() *CommitStats { - if g == nil { - return nil - } - return g.ChangeStatus -} - -// GetCommittedAt returns the CommittedAt field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetCommittedAt() Timestamp { - if g == nil || g.CommittedAt == nil { - return Timestamp{} - } - return *g.CommittedAt -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetNodeID() string { - if g == nil || g.NodeID == nil { - return "" - } - return *g.NodeID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetUser returns the User field. -func (g *GistCommit) GetUser() *User { - if g == nil { - return nil - } - return g.User -} - -// GetVersion returns the Version field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetVersion() string { - if g == nil || g.Version == nil { - return "" - } - return *g.Version -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (g *GistFile) GetContent() string { - if g == nil || g.Content == nil { - return "" - } - return *g.Content -} - -// GetFilename returns the Filename field if it's non-nil, zero value otherwise. -func (g *GistFile) GetFilename() string { - if g == nil || g.Filename == nil { - return "" - } - return *g.Filename -} - -// GetLanguage returns the Language field if it's non-nil, zero value otherwise. -func (g *GistFile) GetLanguage() string { - if g == nil || g.Language == nil { - return "" - } - return *g.Language -} - -// GetRawURL returns the RawURL field if it's non-nil, zero value otherwise. -func (g *GistFile) GetRawURL() string { - if g == nil || g.RawURL == nil { - return "" - } - return *g.RawURL -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (g *GistFile) GetSize() int { - if g == nil || g.Size == nil { - return 0 - } - return *g.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (g *GistFile) GetType() string { - if g == nil || g.Type == nil { - return "" - } - return *g.Type -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GistFork) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *GistFork) GetID() string { - if g == nil || g.ID == nil { - return "" - } - return *g.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (g *GistFork) GetNodeID() string { - if g == nil || g.NodeID == nil { - return "" - } - return *g.NodeID -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *GistFork) GetUpdatedAt() Timestamp { - if g == nil || g.UpdatedAt == nil { - return Timestamp{} - } - return *g.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GistFork) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetUser returns the User field. -func (g *GistFork) GetUser() *User { - if g == nil { - return nil - } - return g.User -} - -// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise. -func (g *GistStats) GetPrivateGists() int { - if g == nil || g.PrivateGists == nil { - return 0 - } - return *g.PrivateGists -} - -// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise. -func (g *GistStats) GetPublicGists() int { - if g == nil || g.PublicGists == nil { - return 0 - } - return *g.PublicGists -} - -// GetTotalGists returns the TotalGists field if it's non-nil, zero value otherwise. -func (g *GistStats) GetTotalGists() int { - if g == nil || g.TotalGists == nil { - return 0 - } - return *g.TotalGists -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (g *GitHubAppAuthorizationEvent) GetAction() string { - if g == nil || g.Action == nil { - return "" - } - return *g.Action -} - -// GetSender returns the Sender field. -func (g *GitHubAppAuthorizationEvent) GetSender() *User { - if g == nil { - return nil - } - return g.Sender -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (g *Gitignore) GetName() string { - if g == nil || g.Name == nil { - return "" - } - return *g.Name -} - -// GetSource returns the Source field if it's non-nil, zero value otherwise. -func (g *Gitignore) GetSource() string { - if g == nil || g.Source == nil { - return "" - } - return *g.Source -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (g *GitObject) GetSHA() string { - if g == nil || g.SHA == nil { - return "" - } - return *g.SHA -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (g *GitObject) GetType() string { - if g == nil || g.Type == nil { - return "" - } - return *g.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GitObject) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetInstallation returns the Installation field. -func (g *GollumEvent) GetInstallation() *Installation { - if g == nil { - return nil - } - return g.Installation -} - -// GetRepo returns the Repo field. -func (g *GollumEvent) GetRepo() *Repository { - if g == nil { - return nil - } - return g.Repo -} - -// GetSender returns the Sender field. -func (g *GollumEvent) GetSender() *User { - if g == nil { - return nil - } - return g.Sender -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (g *GPGEmail) GetEmail() string { - if g == nil || g.Email == nil { - return "" - } - return *g.Email -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (g *GPGEmail) GetVerified() bool { - if g == nil || g.Verified == nil { - return false - } - return *g.Verified -} - -// GetCanCertify returns the CanCertify field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanCertify() bool { - if g == nil || g.CanCertify == nil { - return false - } - return *g.CanCertify -} - -// GetCanEncryptComms returns the CanEncryptComms field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanEncryptComms() bool { - if g == nil || g.CanEncryptComms == nil { - return false - } - return *g.CanEncryptComms -} - -// GetCanEncryptStorage returns the CanEncryptStorage field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanEncryptStorage() bool { - if g == nil || g.CanEncryptStorage == nil { - return false - } - return *g.CanEncryptStorage -} - -// GetCanSign returns the CanSign field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanSign() bool { - if g == nil || g.CanSign == nil { - return false - } - return *g.CanSign -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCreatedAt() time.Time { - if g == nil || g.CreatedAt == nil { - return time.Time{} - } - return *g.CreatedAt -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetExpiresAt() time.Time { - if g == nil || g.ExpiresAt == nil { - return time.Time{} - } - return *g.ExpiresAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetID() int64 { - if g == nil || g.ID == nil { - return 0 - } - return *g.ID -} - -// GetKeyID returns the KeyID field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetKeyID() string { - if g == nil || g.KeyID == nil { - return "" - } - return *g.KeyID -} - -// GetPrimaryKeyID returns the PrimaryKeyID field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetPrimaryKeyID() int64 { - if g == nil || g.PrimaryKeyID == nil { - return 0 - } - return *g.PrimaryKeyID -} - -// GetPublicKey returns the PublicKey field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetPublicKey() string { - if g == nil || g.PublicKey == nil { - return "" - } - return *g.PublicKey -} - -// GetApp returns the App field. -func (g *Grant) GetApp() *AuthorizationApp { - if g == nil { - return nil - } - return g.App -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *Grant) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *Grant) GetID() int64 { - if g == nil || g.ID == nil { - return 0 - } - return *g.ID -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *Grant) GetUpdatedAt() Timestamp { - if g == nil || g.UpdatedAt == nil { - return Timestamp{} - } - return *g.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *Grant) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetActive returns the Active field if it's non-nil, zero value otherwise. -func (h *Hook) GetActive() bool { - if h == nil || h.Active == nil { - return false - } - return *h.Active -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (h *Hook) GetCreatedAt() time.Time { - if h == nil || h.CreatedAt == nil { - return time.Time{} - } - return *h.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (h *Hook) GetID() int64 { - if h == nil || h.ID == nil { - return 0 - } - return *h.ID -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (h *Hook) GetUpdatedAt() time.Time { - if h == nil || h.UpdatedAt == nil { - return time.Time{} - } - return *h.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (h *Hook) GetURL() string { - if h == nil || h.URL == nil { - return "" - } - return *h.URL -} - -// GetActiveHooks returns the ActiveHooks field if it's non-nil, zero value otherwise. -func (h *HookStats) GetActiveHooks() int { - if h == nil || h.ActiveHooks == nil { - return 0 - } - return *h.ActiveHooks -} - -// GetInactiveHooks returns the InactiveHooks field if it's non-nil, zero value otherwise. -func (h *HookStats) GetInactiveHooks() int { - if h == nil || h.InactiveHooks == nil { - return 0 - } - return *h.InactiveHooks -} - -// GetTotalHooks returns the TotalHooks field if it's non-nil, zero value otherwise. -func (h *HookStats) GetTotalHooks() int { - if h == nil || h.TotalHooks == nil { - return 0 - } - return *h.TotalHooks -} - -// GetAuthorsCount returns the AuthorsCount field if it's non-nil, zero value otherwise. -func (i *Import) GetAuthorsCount() int { - if i == nil || i.AuthorsCount == nil { - return 0 - } - return *i.AuthorsCount -} - -// GetAuthorsURL returns the AuthorsURL field if it's non-nil, zero value otherwise. -func (i *Import) GetAuthorsURL() string { - if i == nil || i.AuthorsURL == nil { - return "" - } - return *i.AuthorsURL -} - -// GetCommitCount returns the CommitCount field if it's non-nil, zero value otherwise. -func (i *Import) GetCommitCount() int { - if i == nil || i.CommitCount == nil { - return 0 - } - return *i.CommitCount -} - -// GetFailedStep returns the FailedStep field if it's non-nil, zero value otherwise. -func (i *Import) GetFailedStep() string { - if i == nil || i.FailedStep == nil { - return "" - } - return *i.FailedStep -} - -// GetHasLargeFiles returns the HasLargeFiles field if it's non-nil, zero value otherwise. -func (i *Import) GetHasLargeFiles() bool { - if i == nil || i.HasLargeFiles == nil { - return false - } - return *i.HasLargeFiles -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *Import) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetHumanName returns the HumanName field if it's non-nil, zero value otherwise. -func (i *Import) GetHumanName() string { - if i == nil || i.HumanName == nil { - return "" - } - return *i.HumanName -} - -// GetLargeFilesCount returns the LargeFilesCount field if it's non-nil, zero value otherwise. -func (i *Import) GetLargeFilesCount() int { - if i == nil || i.LargeFilesCount == nil { - return 0 - } - return *i.LargeFilesCount -} - -// GetLargeFilesSize returns the LargeFilesSize field if it's non-nil, zero value otherwise. -func (i *Import) GetLargeFilesSize() int { - if i == nil || i.LargeFilesSize == nil { - return 0 - } - return *i.LargeFilesSize -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (i *Import) GetMessage() string { - if i == nil || i.Message == nil { - return "" - } - return *i.Message -} - -// GetPercent returns the Percent field if it's non-nil, zero value otherwise. -func (i *Import) GetPercent() int { - if i == nil || i.Percent == nil { - return 0 - } - return *i.Percent -} - -// GetPushPercent returns the PushPercent field if it's non-nil, zero value otherwise. -func (i *Import) GetPushPercent() int { - if i == nil || i.PushPercent == nil { - return 0 - } - return *i.PushPercent -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (i *Import) GetRepositoryURL() string { - if i == nil || i.RepositoryURL == nil { - return "" - } - return *i.RepositoryURL -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (i *Import) GetStatus() string { - if i == nil || i.Status == nil { - return "" - } - return *i.Status -} - -// GetStatusText returns the StatusText field if it's non-nil, zero value otherwise. -func (i *Import) GetStatusText() string { - if i == nil || i.StatusText == nil { - return "" - } - return *i.StatusText -} - -// GetTFVCProject returns the TFVCProject field if it's non-nil, zero value otherwise. -func (i *Import) GetTFVCProject() string { - if i == nil || i.TFVCProject == nil { - return "" - } - return *i.TFVCProject -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *Import) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetUseLFS returns the UseLFS field if it's non-nil, zero value otherwise. -func (i *Import) GetUseLFS() string { - if i == nil || i.UseLFS == nil { - return "" - } - return *i.UseLFS -} - -// GetVCS returns the VCS field if it's non-nil, zero value otherwise. -func (i *Import) GetVCS() string { - if i == nil || i.VCS == nil { - return "" - } - return *i.VCS -} - -// GetVCSPassword returns the VCSPassword field if it's non-nil, zero value otherwise. -func (i *Import) GetVCSPassword() string { - if i == nil || i.VCSPassword == nil { - return "" - } - return *i.VCSPassword -} - -// GetVCSURL returns the VCSURL field if it's non-nil, zero value otherwise. -func (i *Import) GetVCSURL() string { - if i == nil || i.VCSURL == nil { - return "" - } - return *i.VCSURL -} - -// GetVCSUsername returns the VCSUsername field if it's non-nil, zero value otherwise. -func (i *Import) GetVCSUsername() string { - if i == nil || i.VCSUsername == nil { - return "" - } - return *i.VCSUsername -} - -// GetAccessTokensURL returns the AccessTokensURL field if it's non-nil, zero value otherwise. -func (i *Installation) GetAccessTokensURL() string { - if i == nil || i.AccessTokensURL == nil { - return "" - } - return *i.AccessTokensURL -} - -// GetAccount returns the Account field. -func (i *Installation) GetAccount() *User { - if i == nil { - return nil - } - return i.Account -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (i *Installation) GetAppID() int64 { - if i == nil || i.AppID == nil { - return 0 - } - return *i.AppID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Installation) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *Installation) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *Installation) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetPermissions returns the Permissions field. -func (i *Installation) GetPermissions() *InstallationPermissions { - if i == nil { - return nil - } - return i.Permissions -} - -// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. -func (i *Installation) GetRepositoriesURL() string { - if i == nil || i.RepositoriesURL == nil { - return "" - } - return *i.RepositoriesURL -} - -// GetRepositorySelection returns the RepositorySelection field if it's non-nil, zero value otherwise. -func (i *Installation) GetRepositorySelection() string { - if i == nil || i.RepositorySelection == nil { - return "" - } - return *i.RepositorySelection -} - -// GetSingleFileName returns the SingleFileName field if it's non-nil, zero value otherwise. -func (i *Installation) GetSingleFileName() string { - if i == nil || i.SingleFileName == nil { - return "" - } - return *i.SingleFileName -} - -// GetTargetID returns the TargetID field if it's non-nil, zero value otherwise. -func (i *Installation) GetTargetID() int64 { - if i == nil || i.TargetID == nil { - return 0 - } - return *i.TargetID -} - -// GetTargetType returns the TargetType field if it's non-nil, zero value otherwise. -func (i *Installation) GetTargetType() string { - if i == nil || i.TargetType == nil { - return "" - } - return *i.TargetType -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *Installation) GetUpdatedAt() Timestamp { - if i == nil || i.UpdatedAt == nil { - return Timestamp{} - } - return *i.UpdatedAt -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *InstallationEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetInstallation returns the Installation field. -func (i *InstallationEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetSender returns the Sender field. -func (i *InstallationEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetContents returns the Contents field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetContents() string { - if i == nil || i.Contents == nil { - return "" - } - return *i.Contents -} - -// GetIssues returns the Issues field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetIssues() string { - if i == nil || i.Issues == nil { - return "" - } - return *i.Issues -} - -// GetMetadata returns the Metadata field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetMetadata() string { - if i == nil || i.Metadata == nil { - return "" - } - return *i.Metadata -} - -// GetSingleFile returns the SingleFile field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetSingleFile() string { - if i == nil || i.SingleFile == nil { - return "" - } - return *i.SingleFile -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *InstallationRepositoriesEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetInstallation returns the Installation field. -func (i *InstallationRepositoriesEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetRepositorySelection returns the RepositorySelection field if it's non-nil, zero value otherwise. -func (i *InstallationRepositoriesEvent) GetRepositorySelection() string { - if i == nil || i.RepositorySelection == nil { - return "" - } - return *i.RepositorySelection -} - -// GetSender returns the Sender field. -func (i *InstallationRepositoriesEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (i *InstallationToken) GetExpiresAt() time.Time { - if i == nil || i.ExpiresAt == nil { - return time.Time{} - } - return *i.ExpiresAt -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (i *InstallationToken) GetToken() string { - if i == nil || i.Token == nil { - return "" - } - return *i.Token -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Invitation) GetCreatedAt() time.Time { - if i == nil || i.CreatedAt == nil { - return time.Time{} - } - return *i.CreatedAt -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (i *Invitation) GetEmail() string { - if i == nil || i.Email == nil { - return "" - } - return *i.Email -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *Invitation) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetInvitationTeamURL returns the InvitationTeamURL field if it's non-nil, zero value otherwise. -func (i *Invitation) GetInvitationTeamURL() string { - if i == nil || i.InvitationTeamURL == nil { - return "" - } - return *i.InvitationTeamURL -} - -// GetInviter returns the Inviter field. -func (i *Invitation) GetInviter() *User { - if i == nil { - return nil - } - return i.Inviter -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (i *Invitation) GetLogin() string { - if i == nil || i.Login == nil { - return "" - } - return *i.Login -} - -// GetRole returns the Role field if it's non-nil, zero value otherwise. -func (i *Invitation) GetRole() string { - if i == nil || i.Role == nil { - return "" - } - return *i.Role -} - -// GetTeamCount returns the TeamCount field if it's non-nil, zero value otherwise. -func (i *Invitation) GetTeamCount() int { - if i == nil || i.TeamCount == nil { - return 0 - } - return *i.TeamCount -} - -// GetActiveLockReason returns the ActiveLockReason field if it's non-nil, zero value otherwise. -func (i *Issue) GetActiveLockReason() string { - if i == nil || i.ActiveLockReason == nil { - return "" - } - return *i.ActiveLockReason -} - -// GetAssignee returns the Assignee field. -func (i *Issue) GetAssignee() *User { - if i == nil { - return nil - } - return i.Assignee -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (i *Issue) GetBody() string { - if i == nil || i.Body == nil { - return "" - } - return *i.Body -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetClosedAt() time.Time { - if i == nil || i.ClosedAt == nil { - return time.Time{} - } - return *i.ClosedAt -} - -// GetClosedBy returns the ClosedBy field. -func (i *Issue) GetClosedBy() *User { - if i == nil { - return nil - } - return i.ClosedBy -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (i *Issue) GetComments() int { - if i == nil || i.Comments == nil { - return 0 - } - return *i.Comments -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetCommentsURL() string { - if i == nil || i.CommentsURL == nil { - return "" - } - return *i.CommentsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetCreatedAt() time.Time { - if i == nil || i.CreatedAt == nil { - return time.Time{} - } - return *i.CreatedAt -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetEventsURL() string { - if i == nil || i.EventsURL == nil { - return "" - } - return *i.EventsURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *Issue) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetLabelsURL() string { - if i == nil || i.LabelsURL == nil { - return "" - } - return *i.LabelsURL -} - -// GetLocked returns the Locked field if it's non-nil, zero value otherwise. -func (i *Issue) GetLocked() bool { - if i == nil || i.Locked == nil { - return false - } - return *i.Locked -} - -// GetMilestone returns the Milestone field. -func (i *Issue) GetMilestone() *Milestone { - if i == nil { - return nil - } - return i.Milestone -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (i *Issue) GetNodeID() string { - if i == nil || i.NodeID == nil { - return "" - } - return *i.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (i *Issue) GetNumber() int { - if i == nil || i.Number == nil { - return 0 - } - return *i.Number -} - -// GetPullRequestLinks returns the PullRequestLinks field. -func (i *Issue) GetPullRequestLinks() *PullRequestLinks { - if i == nil { - return nil - } - return i.PullRequestLinks -} - -// GetReactions returns the Reactions field. -func (i *Issue) GetReactions() *Reactions { - if i == nil { - return nil - } - return i.Reactions -} - -// GetRepository returns the Repository field. -func (i *Issue) GetRepository() *Repository { - if i == nil { - return nil - } - return i.Repository -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetRepositoryURL() string { - if i == nil || i.RepositoryURL == nil { - return "" - } - return *i.RepositoryURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (i *Issue) GetState() string { - if i == nil || i.State == nil { - return "" - } - return *i.State -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (i *Issue) GetTitle() string { - if i == nil || i.Title == nil { - return "" - } - return *i.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetUpdatedAt() time.Time { - if i == nil || i.UpdatedAt == nil { - return time.Time{} - } - return *i.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *Issue) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetUser returns the User field. -func (i *Issue) GetUser() *User { - if i == nil { - return nil - } - return i.User -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetAuthorAssociation() string { - if i == nil || i.AuthorAssociation == nil { - return "" - } - return *i.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetBody() string { - if i == nil || i.Body == nil { - return "" - } - return *i.Body -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetCreatedAt() time.Time { - if i == nil || i.CreatedAt == nil { - return time.Time{} - } - return *i.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetIssueURL returns the IssueURL field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetIssueURL() string { - if i == nil || i.IssueURL == nil { - return "" - } - return *i.IssueURL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetNodeID() string { - if i == nil || i.NodeID == nil { - return "" - } - return *i.NodeID -} - -// GetReactions returns the Reactions field. -func (i *IssueComment) GetReactions() *Reactions { - if i == nil { - return nil - } - return i.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetUpdatedAt() time.Time { - if i == nil || i.UpdatedAt == nil { - return time.Time{} - } - return *i.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetUser returns the User field. -func (i *IssueComment) GetUser() *User { - if i == nil { - return nil - } - return i.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *IssueCommentEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetChanges returns the Changes field. -func (i *IssueCommentEvent) GetChanges() *EditChange { - if i == nil { - return nil - } - return i.Changes -} - -// GetComment returns the Comment field. -func (i *IssueCommentEvent) GetComment() *IssueComment { - if i == nil { - return nil - } - return i.Comment -} - -// GetInstallation returns the Installation field. -func (i *IssueCommentEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetIssue returns the Issue field. -func (i *IssueCommentEvent) GetIssue() *Issue { - if i == nil { - return nil - } - return i.Issue -} - -// GetRepo returns the Repo field. -func (i *IssueCommentEvent) GetRepo() *Repository { - if i == nil { - return nil - } - return i.Repo -} - -// GetSender returns the Sender field. -func (i *IssueCommentEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetActor returns the Actor field. -func (i *IssueEvent) GetActor() *User { - if i == nil { - return nil - } - return i.Actor -} - -// GetAssignee returns the Assignee field. -func (i *IssueEvent) GetAssignee() *User { - if i == nil { - return nil - } - return i.Assignee -} - -// GetAssigner returns the Assigner field. -func (i *IssueEvent) GetAssigner() *User { - if i == nil { - return nil - } - return i.Assigner -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetCommitID() string { - if i == nil || i.CommitID == nil { - return "" - } - return *i.CommitID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetCreatedAt() time.Time { - if i == nil || i.CreatedAt == nil { - return time.Time{} - } - return *i.CreatedAt -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetEvent() string { - if i == nil || i.Event == nil { - return "" - } - return *i.Event -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetIssue returns the Issue field. -func (i *IssueEvent) GetIssue() *Issue { - if i == nil { - return nil - } - return i.Issue -} - -// GetLabel returns the Label field. -func (i *IssueEvent) GetLabel() *Label { - if i == nil { - return nil - } - return i.Label -} - -// GetLockReason returns the LockReason field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetLockReason() string { - if i == nil || i.LockReason == nil { - return "" - } - return *i.LockReason -} - -// GetMilestone returns the Milestone field. -func (i *IssueEvent) GetMilestone() *Milestone { - if i == nil { - return nil - } - return i.Milestone -} - -// GetProjectCard returns the ProjectCard field. -func (i *IssueEvent) GetProjectCard() *ProjectCard { - if i == nil { - return nil - } - return i.ProjectCard -} - -// GetRename returns the Rename field. -func (i *IssueEvent) GetRename() *Rename { - if i == nil { - return nil - } - return i.Rename -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetAssignee returns the Assignee field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetAssignee() string { - if i == nil || i.Assignee == nil { - return "" - } - return *i.Assignee -} - -// GetAssignees returns the Assignees field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetAssignees() []string { - if i == nil || i.Assignees == nil { - return nil - } - return *i.Assignees -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetBody() string { - if i == nil || i.Body == nil { - return "" - } - return *i.Body -} - -// GetLabels returns the Labels field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetLabels() []string { - if i == nil || i.Labels == nil { - return nil - } - return *i.Labels -} - -// GetMilestone returns the Milestone field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetMilestone() int { - if i == nil || i.Milestone == nil { - return 0 - } - return *i.Milestone -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetState() string { - if i == nil || i.State == nil { - return "" - } - return *i.State -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetTitle() string { - if i == nil || i.Title == nil { - return "" - } - return *i.Title -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *IssuesEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetAssignee returns the Assignee field. -func (i *IssuesEvent) GetAssignee() *User { - if i == nil { - return nil - } - return i.Assignee -} - -// GetChanges returns the Changes field. -func (i *IssuesEvent) GetChanges() *EditChange { - if i == nil { - return nil - } - return i.Changes -} - -// GetInstallation returns the Installation field. -func (i *IssuesEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetIssue returns the Issue field. -func (i *IssuesEvent) GetIssue() *Issue { - if i == nil { - return nil - } - return i.Issue -} - -// GetLabel returns the Label field. -func (i *IssuesEvent) GetLabel() *Label { - if i == nil { - return nil - } - return i.Label -} - -// GetRepo returns the Repo field. -func (i *IssuesEvent) GetRepo() *Repository { - if i == nil { - return nil - } - return i.Repo -} - -// GetSender returns the Sender field. -func (i *IssuesEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (i *IssuesSearchResult) GetIncompleteResults() bool { - if i == nil || i.IncompleteResults == nil { - return false - } - return *i.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (i *IssuesSearchResult) GetTotal() int { - if i == nil || i.Total == nil { - return 0 - } - return *i.Total -} - -// GetClosedIssues returns the ClosedIssues field if it's non-nil, zero value otherwise. -func (i *IssueStats) GetClosedIssues() int { - if i == nil || i.ClosedIssues == nil { - return 0 - } - return *i.ClosedIssues -} - -// GetOpenIssues returns the OpenIssues field if it's non-nil, zero value otherwise. -func (i *IssueStats) GetOpenIssues() int { - if i == nil || i.OpenIssues == nil { - return 0 - } - return *i.OpenIssues -} - -// GetTotalIssues returns the TotalIssues field if it's non-nil, zero value otherwise. -func (i *IssueStats) GetTotalIssues() int { - if i == nil || i.TotalIssues == nil { - return 0 - } - return *i.TotalIssues -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (k *Key) GetID() int64 { - if k == nil || k.ID == nil { - return 0 - } - return *k.ID -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (k *Key) GetKey() string { - if k == nil || k.Key == nil { - return "" - } - return *k.Key -} - -// GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise. -func (k *Key) GetReadOnly() bool { - if k == nil || k.ReadOnly == nil { - return false - } - return *k.ReadOnly -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (k *Key) GetTitle() string { - if k == nil || k.Title == nil { - return "" - } - return *k.Title -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (k *Key) GetURL() string { - if k == nil || k.URL == nil { - return "" - } - return *k.URL -} - -// GetColor returns the Color field if it's non-nil, zero value otherwise. -func (l *Label) GetColor() string { - if l == nil || l.Color == nil { - return "" - } - return *l.Color -} - -// GetDefault returns the Default field if it's non-nil, zero value otherwise. -func (l *Label) GetDefault() bool { - if l == nil || l.Default == nil { - return false - } - return *l.Default -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (l *Label) GetDescription() string { - if l == nil || l.Description == nil { - return "" - } - return *l.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (l *Label) GetID() int64 { - if l == nil || l.ID == nil { - return 0 - } - return *l.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (l *Label) GetName() string { - if l == nil || l.Name == nil { - return "" - } - return *l.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (l *Label) GetNodeID() string { - if l == nil || l.NodeID == nil { - return "" - } - return *l.NodeID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (l *Label) GetURL() string { - if l == nil || l.URL == nil { - return "" - } - return *l.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (l *LabelEvent) GetAction() string { - if l == nil || l.Action == nil { - return "" - } - return *l.Action -} - -// GetChanges returns the Changes field. -func (l *LabelEvent) GetChanges() *EditChange { - if l == nil { - return nil - } - return l.Changes -} - -// GetInstallation returns the Installation field. -func (l *LabelEvent) GetInstallation() *Installation { - if l == nil { - return nil - } - return l.Installation -} - -// GetLabel returns the Label field. -func (l *LabelEvent) GetLabel() *Label { - if l == nil { - return nil - } - return l.Label -} - -// GetOrg returns the Org field. -func (l *LabelEvent) GetOrg() *Organization { - if l == nil { - return nil - } - return l.Org -} - -// GetRepo returns the Repo field. -func (l *LabelEvent) GetRepo() *Repository { - if l == nil { - return nil - } - return l.Repo -} - -// GetColor returns the Color field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetColor() string { - if l == nil || l.Color == nil { - return "" - } - return *l.Color -} - -// GetDefault returns the Default field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetDefault() bool { - if l == nil || l.Default == nil { - return false - } - return *l.Default -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetDescription() string { - if l == nil || l.Description == nil { - return "" - } - return *l.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetID() int64 { - if l == nil || l.ID == nil { - return 0 - } - return *l.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetName() string { - if l == nil || l.Name == nil { - return "" - } - return *l.Name -} - -// GetScore returns the Score field. -func (l *LabelResult) GetScore() *float64 { - if l == nil { - return nil - } - return l.Score -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetURL() string { - if l == nil || l.URL == nil { - return "" - } - return *l.URL -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (l *LabelsSearchResult) GetIncompleteResults() bool { - if l == nil || l.IncompleteResults == nil { - return false - } - return *l.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (l *LabelsSearchResult) GetTotal() int { - if l == nil || l.Total == nil { - return 0 - } - return *l.Total -} - -// GetOID returns the OID field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetOID() string { - if l == nil || l.OID == nil { - return "" - } - return *l.OID -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetPath() string { - if l == nil || l.Path == nil { - return "" - } - return *l.Path -} - -// GetRefName returns the RefName field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetRefName() string { - if l == nil || l.RefName == nil { - return "" - } - return *l.RefName -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetSize() int { - if l == nil || l.Size == nil { - return 0 - } - return *l.Size -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (l *License) GetBody() string { - if l == nil || l.Body == nil { - return "" - } - return *l.Body -} - -// GetConditions returns the Conditions field if it's non-nil, zero value otherwise. -func (l *License) GetConditions() []string { - if l == nil || l.Conditions == nil { - return nil - } - return *l.Conditions -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (l *License) GetDescription() string { - if l == nil || l.Description == nil { - return "" - } - return *l.Description -} - -// GetFeatured returns the Featured field if it's non-nil, zero value otherwise. -func (l *License) GetFeatured() bool { - if l == nil || l.Featured == nil { - return false - } - return *l.Featured -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (l *License) GetHTMLURL() string { - if l == nil || l.HTMLURL == nil { - return "" - } - return *l.HTMLURL -} - -// GetImplementation returns the Implementation field if it's non-nil, zero value otherwise. -func (l *License) GetImplementation() string { - if l == nil || l.Implementation == nil { - return "" - } - return *l.Implementation -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (l *License) GetKey() string { - if l == nil || l.Key == nil { - return "" - } - return *l.Key -} - -// GetLimitations returns the Limitations field if it's non-nil, zero value otherwise. -func (l *License) GetLimitations() []string { - if l == nil || l.Limitations == nil { - return nil - } - return *l.Limitations -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (l *License) GetName() string { - if l == nil || l.Name == nil { - return "" - } - return *l.Name -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (l *License) GetPermissions() []string { - if l == nil || l.Permissions == nil { - return nil - } - return *l.Permissions -} - -// GetSPDXID returns the SPDXID field if it's non-nil, zero value otherwise. -func (l *License) GetSPDXID() string { - if l == nil || l.SPDXID == nil { - return "" - } - return *l.SPDXID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (l *License) GetURL() string { - if l == nil || l.URL == nil { - return "" - } - return *l.URL -} - -// GetCheckName returns the CheckName field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetCheckName() string { - if l == nil || l.CheckName == nil { - return "" - } - return *l.CheckName -} - -// GetFilter returns the Filter field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetFilter() string { - if l == nil || l.Filter == nil { - return "" - } - return *l.Filter -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetStatus() string { - if l == nil || l.Status == nil { - return "" - } - return *l.Status -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsResults) GetTotal() int { - if l == nil || l.Total == nil { - return 0 - } - return *l.Total -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (l *ListCheckSuiteOptions) GetAppID() int { - if l == nil || l.AppID == nil { - return 0 - } - return *l.AppID -} - -// GetCheckName returns the CheckName field if it's non-nil, zero value otherwise. -func (l *ListCheckSuiteOptions) GetCheckName() string { - if l == nil || l.CheckName == nil { - return "" - } - return *l.CheckName -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (l *ListCheckSuiteResults) GetTotal() int { - if l == nil || l.Total == nil { - return 0 - } - return *l.Total -} - -// GetEffectiveDate returns the EffectiveDate field if it's non-nil, zero value otherwise. -func (m *MarketplacePendingChange) GetEffectiveDate() Timestamp { - if m == nil || m.EffectiveDate == nil { - return Timestamp{} - } - return *m.EffectiveDate -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePendingChange) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetPlan returns the Plan field. -func (m *MarketplacePendingChange) GetPlan() *MarketplacePlan { - if m == nil { - return nil - } - return m.Plan -} - -// GetUnitCount returns the UnitCount field if it's non-nil, zero value otherwise. -func (m *MarketplacePendingChange) GetUnitCount() int { - if m == nil || m.UnitCount == nil { - return 0 - } - return *m.UnitCount -} - -// GetAccountsURL returns the AccountsURL field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetAccountsURL() string { - if m == nil || m.AccountsURL == nil { - return "" - } - return *m.AccountsURL -} - -// GetBullets returns the Bullets field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetBullets() []string { - if m == nil || m.Bullets == nil { - return nil - } - return *m.Bullets -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetDescription() string { - if m == nil || m.Description == nil { - return "" - } - return *m.Description -} - -// GetHasFreeTrial returns the HasFreeTrial field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetHasFreeTrial() bool { - if m == nil || m.HasFreeTrial == nil { - return false - } - return *m.HasFreeTrial -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetMonthlyPriceInCents returns the MonthlyPriceInCents field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetMonthlyPriceInCents() int { - if m == nil || m.MonthlyPriceInCents == nil { - return 0 - } - return *m.MonthlyPriceInCents -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetName() string { - if m == nil || m.Name == nil { - return "" - } - return *m.Name -} - -// GetPriceModel returns the PriceModel field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetPriceModel() string { - if m == nil || m.PriceModel == nil { - return "" - } - return *m.PriceModel -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetUnitName returns the UnitName field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetUnitName() string { - if m == nil || m.UnitName == nil { - return "" - } - return *m.UnitName -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetYearlyPriceInCents returns the YearlyPriceInCents field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetYearlyPriceInCents() int { - if m == nil || m.YearlyPriceInCents == nil { - return 0 - } - return *m.YearlyPriceInCents -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetEmail() string { - if m == nil || m.Email == nil { - return "" - } - return *m.Email -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetLogin() string { - if m == nil || m.Login == nil { - return "" - } - return *m.Login -} - -// GetMarketplacePendingChange returns the MarketplacePendingChange field. -func (m *MarketplacePlanAccount) GetMarketplacePendingChange() *MarketplacePendingChange { - if m == nil { - return nil - } - return m.MarketplacePendingChange -} - -// GetMarketplacePurchase returns the MarketplacePurchase field. -func (m *MarketplacePlanAccount) GetMarketplacePurchase() *MarketplacePurchase { - if m == nil { - return nil - } - return m.MarketplacePurchase -} - -// GetOrganizationBillingEmail returns the OrganizationBillingEmail field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetOrganizationBillingEmail() string { - if m == nil || m.OrganizationBillingEmail == nil { - return "" - } - return *m.OrganizationBillingEmail -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetType() string { - if m == nil || m.Type == nil { - return "" - } - return *m.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetAccount returns the Account field. -func (m *MarketplacePurchase) GetAccount() *MarketplacePlanAccount { - if m == nil { - return nil - } - return m.Account -} - -// GetBillingCycle returns the BillingCycle field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetBillingCycle() string { - if m == nil || m.BillingCycle == nil { - return "" - } - return *m.BillingCycle -} - -// GetFreeTrialEndsOn returns the FreeTrialEndsOn field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetFreeTrialEndsOn() Timestamp { - if m == nil || m.FreeTrialEndsOn == nil { - return Timestamp{} - } - return *m.FreeTrialEndsOn -} - -// GetNextBillingDate returns the NextBillingDate field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetNextBillingDate() Timestamp { - if m == nil || m.NextBillingDate == nil { - return Timestamp{} - } - return *m.NextBillingDate -} - -// GetOnFreeTrial returns the OnFreeTrial field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetOnFreeTrial() bool { - if m == nil || m.OnFreeTrial == nil { - return false - } - return *m.OnFreeTrial -} - -// GetPlan returns the Plan field. -func (m *MarketplacePurchase) GetPlan() *MarketplacePlan { - if m == nil { - return nil - } - return m.Plan -} - -// GetUnitCount returns the UnitCount field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetUnitCount() int { - if m == nil || m.UnitCount == nil { - return 0 - } - return *m.UnitCount -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetEffectiveDate returns the EffectiveDate field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseEvent) GetEffectiveDate() Timestamp { - if m == nil || m.EffectiveDate == nil { - return Timestamp{} - } - return *m.EffectiveDate -} - -// GetInstallation returns the Installation field. -func (m *MarketplacePurchaseEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMarketplacePurchase returns the MarketplacePurchase field. -func (m *MarketplacePurchaseEvent) GetMarketplacePurchase() *MarketplacePurchase { - if m == nil { - return nil - } - return m.MarketplacePurchase -} - -// GetPreviousMarketplacePurchase returns the PreviousMarketplacePurchase field. -func (m *MarketplacePurchaseEvent) GetPreviousMarketplacePurchase() *MarketplacePurchase { - if m == nil { - return nil - } - return m.PreviousMarketplacePurchase -} - -// GetSender returns the Sender field. -func (m *MarketplacePurchaseEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetText returns the Text field if it's non-nil, zero value otherwise. -func (m *Match) GetText() string { - if m == nil || m.Text == nil { - return "" - } - return *m.Text -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MemberEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetInstallation returns the Installation field. -func (m *MemberEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMember returns the Member field. -func (m *MemberEvent) GetMember() *User { - if m == nil { - return nil - } - return m.Member -} - -// GetRepo returns the Repo field. -func (m *MemberEvent) GetRepo() *Repository { - if m == nil { - return nil - } - return m.Repo -} - -// GetSender returns the Sender field. -func (m *MemberEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetOrganization returns the Organization field. -func (m *Membership) GetOrganization() *Organization { - if m == nil { - return nil - } - return m.Organization -} - -// GetOrganizationURL returns the OrganizationURL field if it's non-nil, zero value otherwise. -func (m *Membership) GetOrganizationURL() string { - if m == nil || m.OrganizationURL == nil { - return "" - } - return *m.OrganizationURL -} - -// GetRole returns the Role field if it's non-nil, zero value otherwise. -func (m *Membership) GetRole() string { - if m == nil || m.Role == nil { - return "" - } - return *m.Role -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *Membership) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Membership) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetUser returns the User field. -func (m *Membership) GetUser() *User { - if m == nil { - return nil - } - return m.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MembershipEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetInstallation returns the Installation field. -func (m *MembershipEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMember returns the Member field. -func (m *MembershipEvent) GetMember() *User { - if m == nil { - return nil - } - return m.Member -} - -// GetOrg returns the Org field. -func (m *MembershipEvent) GetOrg() *Organization { - if m == nil { - return nil - } - return m.Org -} - -// GetScope returns the Scope field if it's non-nil, zero value otherwise. -func (m *MembershipEvent) GetScope() string { - if m == nil || m.Scope == nil { - return "" - } - return *m.Scope -} - -// GetSender returns the Sender field. -func (m *MembershipEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetTeam returns the Team field. -func (m *MembershipEvent) GetTeam() *Team { - if m == nil { - return nil - } - return m.Team -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (m *Metric) GetHTMLURL() string { - if m == nil || m.HTMLURL == nil { - return "" - } - return *m.HTMLURL -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (m *Metric) GetKey() string { - if m == nil || m.Key == nil { - return "" - } - return *m.Key -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (m *Metric) GetName() string { - if m == nil || m.Name == nil { - return "" - } - return *m.Name -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Metric) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (m *Migration) GetCreatedAt() string { - if m == nil || m.CreatedAt == nil { - return "" - } - return *m.CreatedAt -} - -// GetExcludeAttachments returns the ExcludeAttachments field if it's non-nil, zero value otherwise. -func (m *Migration) GetExcludeAttachments() bool { - if m == nil || m.ExcludeAttachments == nil { - return false - } - return *m.ExcludeAttachments -} - -// GetGUID returns the GUID field if it's non-nil, zero value otherwise. -func (m *Migration) GetGUID() string { - if m == nil || m.GUID == nil { - return "" - } - return *m.GUID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *Migration) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLockRepositories returns the LockRepositories field if it's non-nil, zero value otherwise. -func (m *Migration) GetLockRepositories() bool { - if m == nil || m.LockRepositories == nil { - return false - } - return *m.LockRepositories -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *Migration) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (m *Migration) GetUpdatedAt() string { - if m == nil || m.UpdatedAt == nil { - return "" - } - return *m.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Migration) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetClosedAt() time.Time { - if m == nil || m.ClosedAt == nil { - return time.Time{} - } - return *m.ClosedAt -} - -// GetClosedIssues returns the ClosedIssues field if it's non-nil, zero value otherwise. -func (m *Milestone) GetClosedIssues() int { - if m == nil || m.ClosedIssues == nil { - return 0 - } - return *m.ClosedIssues -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetCreatedAt() time.Time { - if m == nil || m.CreatedAt == nil { - return time.Time{} - } - return *m.CreatedAt -} - -// GetCreator returns the Creator field. -func (m *Milestone) GetCreator() *User { - if m == nil { - return nil - } - return m.Creator -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (m *Milestone) GetDescription() string { - if m == nil || m.Description == nil { - return "" - } - return *m.Description -} - -// GetDueOn returns the DueOn field if it's non-nil, zero value otherwise. -func (m *Milestone) GetDueOn() time.Time { - if m == nil || m.DueOn == nil { - return time.Time{} - } - return *m.DueOn -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (m *Milestone) GetHTMLURL() string { - if m == nil || m.HTMLURL == nil { - return "" - } - return *m.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *Milestone) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. -func (m *Milestone) GetLabelsURL() string { - if m == nil || m.LabelsURL == nil { - return "" - } - return *m.LabelsURL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (m *Milestone) GetNodeID() string { - if m == nil || m.NodeID == nil { - return "" - } - return *m.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (m *Milestone) GetNumber() int { - if m == nil || m.Number == nil { - return 0 - } - return *m.Number -} - -// GetOpenIssues returns the OpenIssues field if it's non-nil, zero value otherwise. -func (m *Milestone) GetOpenIssues() int { - if m == nil || m.OpenIssues == nil { - return 0 - } - return *m.OpenIssues -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *Milestone) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (m *Milestone) GetTitle() string { - if m == nil || m.Title == nil { - return "" - } - return *m.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetUpdatedAt() time.Time { - if m == nil || m.UpdatedAt == nil { - return time.Time{} - } - return *m.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Milestone) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MilestoneEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetChanges returns the Changes field. -func (m *MilestoneEvent) GetChanges() *EditChange { - if m == nil { - return nil - } - return m.Changes -} - -// GetInstallation returns the Installation field. -func (m *MilestoneEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMilestone returns the Milestone field. -func (m *MilestoneEvent) GetMilestone() *Milestone { - if m == nil { - return nil - } - return m.Milestone -} - -// GetOrg returns the Org field. -func (m *MilestoneEvent) GetOrg() *Organization { - if m == nil { - return nil - } - return m.Org -} - -// GetRepo returns the Repo field. -func (m *MilestoneEvent) GetRepo() *Repository { - if m == nil { - return nil - } - return m.Repo -} - -// GetSender returns the Sender field. -func (m *MilestoneEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetClosedMilestones returns the ClosedMilestones field if it's non-nil, zero value otherwise. -func (m *MilestoneStats) GetClosedMilestones() int { - if m == nil || m.ClosedMilestones == nil { - return 0 - } - return *m.ClosedMilestones -} - -// GetOpenMilestones returns the OpenMilestones field if it's non-nil, zero value otherwise. -func (m *MilestoneStats) GetOpenMilestones() int { - if m == nil || m.OpenMilestones == nil { - return 0 - } - return *m.OpenMilestones -} - -// GetTotalMilestones returns the TotalMilestones field if it's non-nil, zero value otherwise. -func (m *MilestoneStats) GetTotalMilestones() int { - if m == nil || m.TotalMilestones == nil { - return 0 - } - return *m.TotalMilestones -} - -// GetBase returns the Base field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetBase() string { - if n == nil || n.Base == nil { - return "" - } - return *n.Base -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetBody() string { - if n == nil || n.Body == nil { - return "" - } - return *n.Body -} - -// GetHead returns the Head field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetHead() string { - if n == nil || n.Head == nil { - return "" - } - return *n.Head -} - -// GetIssue returns the Issue field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetIssue() int { - if n == nil || n.Issue == nil { - return 0 - } - return *n.Issue -} - -// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetMaintainerCanModify() bool { - if n == nil || n.MaintainerCanModify == nil { - return false - } - return *n.MaintainerCanModify -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetTitle() string { - if n == nil || n.Title == nil { - return "" - } - return *n.Title -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetDescription() string { - if n == nil || n.Description == nil { - return "" - } - return *n.Description -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetLDAPDN() string { - if n == nil || n.LDAPDN == nil { - return "" - } - return *n.LDAPDN -} - -// GetParentTeamID returns the ParentTeamID field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetParentTeamID() int64 { - if n == nil || n.ParentTeamID == nil { - return 0 - } - return *n.ParentTeamID -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetPermission() string { - if n == nil || n.Permission == nil { - return "" - } - return *n.Permission -} - -// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetPrivacy() string { - if n == nil || n.Privacy == nil { - return "" - } - return *n.Privacy -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (n *Notification) GetID() string { - if n == nil || n.ID == nil { - return "" - } - return *n.ID -} - -// GetLastReadAt returns the LastReadAt field if it's non-nil, zero value otherwise. -func (n *Notification) GetLastReadAt() time.Time { - if n == nil || n.LastReadAt == nil { - return time.Time{} - } - return *n.LastReadAt -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (n *Notification) GetReason() string { - if n == nil || n.Reason == nil { - return "" - } - return *n.Reason -} - -// GetRepository returns the Repository field. -func (n *Notification) GetRepository() *Repository { - if n == nil { - return nil - } - return n.Repository -} - -// GetSubject returns the Subject field. -func (n *Notification) GetSubject() *NotificationSubject { - if n == nil { - return nil - } - return n.Subject -} - -// GetUnread returns the Unread field if it's non-nil, zero value otherwise. -func (n *Notification) GetUnread() bool { - if n == nil || n.Unread == nil { - return false - } - return *n.Unread -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (n *Notification) GetUpdatedAt() time.Time { - if n == nil || n.UpdatedAt == nil { - return time.Time{} - } - return *n.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (n *Notification) GetURL() string { - if n == nil || n.URL == nil { - return "" - } - return *n.URL -} - -// GetLatestCommentURL returns the LatestCommentURL field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetLatestCommentURL() string { - if n == nil || n.LatestCommentURL == nil { - return "" - } - return *n.LatestCommentURL -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetTitle() string { - if n == nil || n.Title == nil { - return "" - } - return *n.Title -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetType() string { - if n == nil || n.Type == nil { - return "" - } - return *n.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetURL() string { - if n == nil || n.URL == nil { - return "" - } - return *n.URL -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetAvatarURL() string { - if o == nil || o.AvatarURL == nil { - return "" - } - return *o.AvatarURL -} - -// GetBillingEmail returns the BillingEmail field if it's non-nil, zero value otherwise. -func (o *Organization) GetBillingEmail() string { - if o == nil || o.BillingEmail == nil { - return "" - } - return *o.BillingEmail -} - -// GetBlog returns the Blog field if it's non-nil, zero value otherwise. -func (o *Organization) GetBlog() string { - if o == nil || o.Blog == nil { - return "" - } - return *o.Blog -} - -// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise. -func (o *Organization) GetCollaborators() int { - if o == nil || o.Collaborators == nil { - return 0 - } - return *o.Collaborators -} - -// GetCompany returns the Company field if it's non-nil, zero value otherwise. -func (o *Organization) GetCompany() string { - if o == nil || o.Company == nil { - return "" - } - return *o.Company -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (o *Organization) GetCreatedAt() time.Time { - if o == nil || o.CreatedAt == nil { - return time.Time{} - } - return *o.CreatedAt -} - -// GetDefaultRepoPermission returns the DefaultRepoPermission field if it's non-nil, zero value otherwise. -func (o *Organization) GetDefaultRepoPermission() string { - if o == nil || o.DefaultRepoPermission == nil { - return "" - } - return *o.DefaultRepoPermission -} - -// GetDefaultRepoSettings returns the DefaultRepoSettings field if it's non-nil, zero value otherwise. -func (o *Organization) GetDefaultRepoSettings() string { - if o == nil || o.DefaultRepoSettings == nil { - return "" - } - return *o.DefaultRepoSettings -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (o *Organization) GetDescription() string { - if o == nil || o.Description == nil { - return "" - } - return *o.Description -} - -// GetDiskUsage returns the DiskUsage field if it's non-nil, zero value otherwise. -func (o *Organization) GetDiskUsage() int { - if o == nil || o.DiskUsage == nil { - return 0 - } - return *o.DiskUsage -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (o *Organization) GetEmail() string { - if o == nil || o.Email == nil { - return "" - } - return *o.Email -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetEventsURL() string { - if o == nil || o.EventsURL == nil { - return "" - } - return *o.EventsURL -} - -// GetFollowers returns the Followers field if it's non-nil, zero value otherwise. -func (o *Organization) GetFollowers() int { - if o == nil || o.Followers == nil { - return 0 - } - return *o.Followers -} - -// GetFollowing returns the Following field if it's non-nil, zero value otherwise. -func (o *Organization) GetFollowing() int { - if o == nil || o.Following == nil { - return 0 - } - return *o.Following -} - -// GetHooksURL returns the HooksURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetHooksURL() string { - if o == nil || o.HooksURL == nil { - return "" - } - return *o.HooksURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetHTMLURL() string { - if o == nil || o.HTMLURL == nil { - return "" - } - return *o.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (o *Organization) GetID() int64 { - if o == nil || o.ID == nil { - return 0 - } - return *o.ID -} - -// GetIssuesURL returns the IssuesURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetIssuesURL() string { - if o == nil || o.IssuesURL == nil { - return "" - } - return *o.IssuesURL -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (o *Organization) GetLocation() string { - if o == nil || o.Location == nil { - return "" - } - return *o.Location -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (o *Organization) GetLogin() string { - if o == nil || o.Login == nil { - return "" - } - return *o.Login -} - -// GetMembersCanCreateRepos returns the MembersCanCreateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreateRepos() bool { - if o == nil || o.MembersCanCreateRepos == nil { - return false - } - return *o.MembersCanCreateRepos -} - -// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersURL() string { - if o == nil || o.MembersURL == nil { - return "" - } - return *o.MembersURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (o *Organization) GetName() string { - if o == nil || o.Name == nil { - return "" - } - return *o.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (o *Organization) GetNodeID() string { - if o == nil || o.NodeID == nil { - return "" - } - return *o.NodeID -} - -// GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetOwnedPrivateRepos() int { - if o == nil || o.OwnedPrivateRepos == nil { - return 0 - } - return *o.OwnedPrivateRepos -} - -// GetPlan returns the Plan field. -func (o *Organization) GetPlan() *Plan { - if o == nil { - return nil - } - return o.Plan -} - -// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise. -func (o *Organization) GetPrivateGists() int { - if o == nil || o.PrivateGists == nil { - return 0 - } - return *o.PrivateGists -} - -// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise. -func (o *Organization) GetPublicGists() int { - if o == nil || o.PublicGists == nil { - return 0 - } - return *o.PublicGists -} - -// GetPublicMembersURL returns the PublicMembersURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetPublicMembersURL() string { - if o == nil || o.PublicMembersURL == nil { - return "" - } - return *o.PublicMembersURL -} - -// GetPublicRepos returns the PublicRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetPublicRepos() int { - if o == nil || o.PublicRepos == nil { - return 0 - } - return *o.PublicRepos -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetReposURL() string { - if o == nil || o.ReposURL == nil { - return "" - } - return *o.ReposURL -} - -// GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetTotalPrivateRepos() int { - if o == nil || o.TotalPrivateRepos == nil { - return 0 - } - return *o.TotalPrivateRepos -} - -// GetTwoFactorRequirementEnabled returns the TwoFactorRequirementEnabled field if it's non-nil, zero value otherwise. -func (o *Organization) GetTwoFactorRequirementEnabled() bool { - if o == nil || o.TwoFactorRequirementEnabled == nil { - return false - } - return *o.TwoFactorRequirementEnabled -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (o *Organization) GetType() string { - if o == nil || o.Type == nil { - return "" - } - return *o.Type -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (o *Organization) GetUpdatedAt() time.Time { - if o == nil || o.UpdatedAt == nil { - return time.Time{} - } - return *o.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (o *Organization) GetURL() string { - if o == nil || o.URL == nil { - return "" - } - return *o.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (o *OrganizationEvent) GetAction() string { - if o == nil || o.Action == nil { - return "" - } - return *o.Action -} - -// GetInstallation returns the Installation field. -func (o *OrganizationEvent) GetInstallation() *Installation { - if o == nil { - return nil - } - return o.Installation -} - -// GetInvitation returns the Invitation field. -func (o *OrganizationEvent) GetInvitation() *Invitation { - if o == nil { - return nil - } - return o.Invitation -} - -// GetMembership returns the Membership field. -func (o *OrganizationEvent) GetMembership() *Membership { - if o == nil { - return nil - } - return o.Membership -} - -// GetOrganization returns the Organization field. -func (o *OrganizationEvent) GetOrganization() *Organization { - if o == nil { - return nil - } - return o.Organization -} - -// GetSender returns the Sender field. -func (o *OrganizationEvent) GetSender() *User { - if o == nil { - return nil - } - return o.Sender -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (o *OrgBlockEvent) GetAction() string { - if o == nil || o.Action == nil { - return "" - } - return *o.Action -} - -// GetBlockedUser returns the BlockedUser field. -func (o *OrgBlockEvent) GetBlockedUser() *User { - if o == nil { - return nil - } - return o.BlockedUser -} - -// GetInstallation returns the Installation field. -func (o *OrgBlockEvent) GetInstallation() *Installation { - if o == nil { - return nil - } - return o.Installation -} - -// GetOrganization returns the Organization field. -func (o *OrgBlockEvent) GetOrganization() *Organization { - if o == nil { - return nil - } - return o.Organization -} - -// GetSender returns the Sender field. -func (o *OrgBlockEvent) GetSender() *User { - if o == nil { - return nil - } - return o.Sender -} - -// GetDisabledOrgs returns the DisabledOrgs field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetDisabledOrgs() int { - if o == nil || o.DisabledOrgs == nil { - return 0 - } - return *o.DisabledOrgs -} - -// GetTotalOrgs returns the TotalOrgs field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetTotalOrgs() int { - if o == nil || o.TotalOrgs == nil { - return 0 - } - return *o.TotalOrgs -} - -// GetTotalTeamMembers returns the TotalTeamMembers field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetTotalTeamMembers() int { - if o == nil || o.TotalTeamMembers == nil { - return 0 - } - return *o.TotalTeamMembers -} - -// GetTotalTeams returns the TotalTeams field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetTotalTeams() int { - if o == nil || o.TotalTeams == nil { - return 0 - } - return *o.TotalTeams -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *Page) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Page) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetPageName returns the PageName field if it's non-nil, zero value otherwise. -func (p *Page) GetPageName() string { - if p == nil || p.PageName == nil { - return "" - } - return *p.PageName -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *Page) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (p *Page) GetSummary() string { - if p == nil || p.Summary == nil { - return "" - } - return *p.Summary -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (p *Page) GetTitle() string { - if p == nil || p.Title == nil { - return "" - } - return *p.Title -} - -// GetBuild returns the Build field. -func (p *PageBuildEvent) GetBuild() *PagesBuild { - if p == nil { - return nil - } - return p.Build -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PageBuildEvent) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetInstallation returns the Installation field. -func (p *PageBuildEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetRepo returns the Repo field. -func (p *PageBuildEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PageBuildEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetCNAME returns the CNAME field if it's non-nil, zero value otherwise. -func (p *Pages) GetCNAME() string { - if p == nil || p.CNAME == nil { - return "" - } - return *p.CNAME -} - -// GetCustom404 returns the Custom404 field if it's non-nil, zero value otherwise. -func (p *Pages) GetCustom404() bool { - if p == nil || p.Custom404 == nil { - return false - } - return *p.Custom404 -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Pages) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (p *Pages) GetStatus() string { - if p == nil || p.Status == nil { - return "" - } - return *p.Status -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *Pages) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetCommit returns the Commit field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetCommit() string { - if p == nil || p.Commit == nil { - return "" - } - return *p.Commit -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDuration returns the Duration field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetDuration() int { - if p == nil || p.Duration == nil { - return 0 - } - return *p.Duration -} - -// GetError returns the Error field. -func (p *PagesBuild) GetError() *PagesError { - if p == nil { - return nil - } - return p.Error -} - -// GetPusher returns the Pusher field. -func (p *PagesBuild) GetPusher() *User { - if p == nil { - return nil - } - return p.Pusher -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetStatus() string { - if p == nil || p.Status == nil { - return "" - } - return *p.Status -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PagesError) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetTotalPages returns the TotalPages field if it's non-nil, zero value otherwise. -func (p *PageStats) GetTotalPages() int { - if p == nil || p.TotalPages == nil { - return 0 - } - return *p.TotalPages -} - -// GetHook returns the Hook field. -func (p *PingEvent) GetHook() *Hook { - if p == nil { - return nil - } - return p.Hook -} - -// GetHookID returns the HookID field if it's non-nil, zero value otherwise. -func (p *PingEvent) GetHookID() int64 { - if p == nil || p.HookID == nil { - return 0 - } - return *p.HookID -} - -// GetInstallation returns the Installation field. -func (p *PingEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetZen returns the Zen field if it's non-nil, zero value otherwise. -func (p *PingEvent) GetZen() string { - if p == nil || p.Zen == nil { - return "" - } - return *p.Zen -} - -// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise. -func (p *Plan) GetCollaborators() int { - if p == nil || p.Collaborators == nil { - return 0 - } - return *p.Collaborators -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *Plan) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetPrivateRepos returns the PrivateRepos field if it's non-nil, zero value otherwise. -func (p *Plan) GetPrivateRepos() int { - if p == nil || p.PrivateRepos == nil { - return 0 - } - return *p.PrivateRepos -} - -// GetSpace returns the Space field if it's non-nil, zero value otherwise. -func (p *Plan) GetSpace() int { - if p == nil || p.Space == nil { - return 0 - } - return *p.Space -} - -// GetConfigURL returns the ConfigURL field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetConfigURL() string { - if p == nil || p.ConfigURL == nil { - return "" - } - return *p.ConfigURL -} - -// GetEnforcement returns the Enforcement field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetEnforcement() string { - if p == nil || p.Enforcement == nil { - return "" - } - return *p.Enforcement -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetHRef returns the HRef field if it's non-nil, zero value otherwise. -func (p *PRLink) GetHRef() string { - if p == nil || p.HRef == nil { - return "" - } - return *p.HRef -} - -// GetComments returns the Comments field. -func (p *PRLinks) GetComments() *PRLink { - if p == nil { - return nil - } - return p.Comments -} - -// GetCommits returns the Commits field. -func (p *PRLinks) GetCommits() *PRLink { - if p == nil { - return nil - } - return p.Commits -} - -// GetHTML returns the HTML field. -func (p *PRLinks) GetHTML() *PRLink { - if p == nil { - return nil - } - return p.HTML -} - -// GetIssue returns the Issue field. -func (p *PRLinks) GetIssue() *PRLink { - if p == nil { - return nil - } - return p.Issue -} - -// GetReviewComment returns the ReviewComment field. -func (p *PRLinks) GetReviewComment() *PRLink { - if p == nil { - return nil - } - return p.ReviewComment -} - -// GetReviewComments returns the ReviewComments field. -func (p *PRLinks) GetReviewComments() *PRLink { - if p == nil { - return nil - } - return p.ReviewComments -} - -// GetSelf returns the Self field. -func (p *PRLinks) GetSelf() *PRLink { - if p == nil { - return nil - } - return p.Self -} - -// GetStatuses returns the Statuses field. -func (p *PRLinks) GetStatuses() *PRLink { - if p == nil { - return nil - } - return p.Statuses -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *Project) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetColumnsURL returns the ColumnsURL field if it's non-nil, zero value otherwise. -func (p *Project) GetColumnsURL() string { - if p == nil || p.ColumnsURL == nil { - return "" - } - return *p.ColumnsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *Project) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetCreator returns the Creator field. -func (p *Project) GetCreator() *User { - if p == nil { - return nil - } - return p.Creator -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Project) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *Project) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *Project) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *Project) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *Project) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetOwnerURL returns the OwnerURL field if it's non-nil, zero value otherwise. -func (p *Project) GetOwnerURL() string { - if p == nil || p.OwnerURL == nil { - return "" - } - return *p.OwnerURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *Project) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *Project) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *Project) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetArchived() bool { - if p == nil || p.Archived == nil { - return false - } - return *p.Archived -} - -// GetColumnID returns the ColumnID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetColumnID() int64 { - if p == nil || p.ColumnID == nil { - return 0 - } - return *p.ColumnID -} - -// GetColumnName returns the ColumnName field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetColumnName() string { - if p == nil || p.ColumnName == nil { - return "" - } - return *p.ColumnName -} - -// GetColumnURL returns the ColumnURL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetColumnURL() string { - if p == nil || p.ColumnURL == nil { - return "" - } - return *p.ColumnURL -} - -// GetContentURL returns the ContentURL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetContentURL() string { - if p == nil || p.ContentURL == nil { - return "" - } - return *p.ContentURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetCreator returns the Creator field. -func (p *ProjectCard) GetCreator() *User { - if p == nil { - return nil - } - return p.Creator -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetNote() string { - if p == nil || p.Note == nil { - return "" - } - return *p.Note -} - -// GetPreviousColumnName returns the PreviousColumnName field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetPreviousColumnName() string { - if p == nil || p.PreviousColumnName == nil { - return "" - } - return *p.PreviousColumnName -} - -// GetProjectID returns the ProjectID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetProjectID() int64 { - if p == nil || p.ProjectID == nil { - return 0 - } - return *p.ProjectID -} - -// GetProjectURL returns the ProjectURL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetProjectURL() string { - if p == nil || p.ProjectURL == nil { - return "" - } - return *p.ProjectURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *ProjectCardEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfterID returns the AfterID field if it's non-nil, zero value otherwise. -func (p *ProjectCardEvent) GetAfterID() int64 { - if p == nil || p.AfterID == nil { - return 0 - } - return *p.AfterID -} - -// GetChanges returns the Changes field. -func (p *ProjectCardEvent) GetChanges() *ProjectCardChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *ProjectCardEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *ProjectCardEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetProjectCard returns the ProjectCard field. -func (p *ProjectCardEvent) GetProjectCard() *ProjectCard { - if p == nil { - return nil - } - return p.ProjectCard -} - -// GetRepo returns the Repo field. -func (p *ProjectCardEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *ProjectCardEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetArchivedState returns the ArchivedState field if it's non-nil, zero value otherwise. -func (p *ProjectCardListOptions) GetArchivedState() string { - if p == nil || p.ArchivedState == nil { - return "" - } - return *p.ArchivedState -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (p *ProjectCardOptions) GetArchived() bool { - if p == nil || p.Archived == nil { - return false - } - return *p.Archived -} - -// GetCardsURL returns the CardsURL field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetCardsURL() string { - if p == nil || p.CardsURL == nil { - return "" - } - return *p.CardsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetProjectURL returns the ProjectURL field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetProjectURL() string { - if p == nil || p.ProjectURL == nil { - return "" - } - return *p.ProjectURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *ProjectColumnEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfterID returns the AfterID field if it's non-nil, zero value otherwise. -func (p *ProjectColumnEvent) GetAfterID() int64 { - if p == nil || p.AfterID == nil { - return 0 - } - return *p.AfterID -} - -// GetChanges returns the Changes field. -func (p *ProjectColumnEvent) GetChanges() *ProjectColumnChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *ProjectColumnEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *ProjectColumnEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetProjectColumn returns the ProjectColumn field. -func (p *ProjectColumnEvent) GetProjectColumn() *ProjectColumn { - if p == nil { - return nil - } - return p.ProjectColumn -} - -// GetRepo returns the Repo field. -func (p *ProjectColumnEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *ProjectColumnEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *ProjectEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetChanges returns the Changes field. -func (p *ProjectEvent) GetChanges() *ProjectChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *ProjectEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *ProjectEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetProject returns the Project field. -func (p *ProjectEvent) GetProject() *Project { - if p == nil { - return nil - } - return p.Project -} - -// GetRepo returns the Repo field. -func (p *ProjectEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *ProjectEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetOrganizationPermission returns the OrganizationPermission field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetOrganizationPermission() string { - if p == nil || p.OrganizationPermission == nil { - return "" - } - return *p.OrganizationPermission -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetPublic() bool { - if p == nil || p.Public == nil { - return false - } - return *p.Public -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetEnforceAdmins returns the EnforceAdmins field. -func (p *Protection) GetEnforceAdmins() *AdminEnforcement { - if p == nil { - return nil - } - return p.EnforceAdmins -} - -// GetRequiredPullRequestReviews returns the RequiredPullRequestReviews field. -func (p *Protection) GetRequiredPullRequestReviews() *PullRequestReviewsEnforcement { - if p == nil { - return nil - } - return p.RequiredPullRequestReviews -} - -// GetRequiredStatusChecks returns the RequiredStatusChecks field. -func (p *Protection) GetRequiredStatusChecks() *RequiredStatusChecks { - if p == nil { - return nil - } - return p.RequiredStatusChecks -} - -// GetRestrictions returns the Restrictions field. -func (p *Protection) GetRestrictions() *BranchRestrictions { - if p == nil { - return nil - } - return p.Restrictions -} - -// GetRequiredPullRequestReviews returns the RequiredPullRequestReviews field. -func (p *ProtectionRequest) GetRequiredPullRequestReviews() *PullRequestReviewsEnforcementRequest { - if p == nil { - return nil - } - return p.RequiredPullRequestReviews -} - -// GetRequiredStatusChecks returns the RequiredStatusChecks field. -func (p *ProtectionRequest) GetRequiredStatusChecks() *RequiredStatusChecks { - if p == nil { - return nil - } - return p.RequiredStatusChecks -} - -// GetRestrictions returns the Restrictions field. -func (p *ProtectionRequest) GetRestrictions() *BranchRestrictionsRequest { - if p == nil { - return nil - } - return p.Restrictions -} - -// GetInstallation returns the Installation field. -func (p *PublicEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetRepo returns the Repo field. -func (p *PublicEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PublicEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetActiveLockReason returns the ActiveLockReason field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetActiveLockReason() string { - if p == nil || p.ActiveLockReason == nil { - return "" - } - return *p.ActiveLockReason -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetAdditions() int { - if p == nil || p.Additions == nil { - return 0 - } - return *p.Additions -} - -// GetAssignee returns the Assignee field. -func (p *PullRequest) GetAssignee() *User { - if p == nil { - return nil - } - return p.Assignee -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetAuthorAssociation() string { - if p == nil || p.AuthorAssociation == nil { - return "" - } - return *p.AuthorAssociation -} - -// GetBase returns the Base field. -func (p *PullRequest) GetBase() *PullRequestBranch { - if p == nil { - return nil - } - return p.Base -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetChangedFiles returns the ChangedFiles field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetChangedFiles() int { - if p == nil || p.ChangedFiles == nil { - return 0 - } - return *p.ChangedFiles -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetClosedAt() time.Time { - if p == nil || p.ClosedAt == nil { - return time.Time{} - } - return *p.ClosedAt -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetComments() int { - if p == nil || p.Comments == nil { - return 0 - } - return *p.Comments -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCommentsURL() string { - if p == nil || p.CommentsURL == nil { - return "" - } - return *p.CommentsURL -} - -// GetCommits returns the Commits field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCommits() int { - if p == nil || p.Commits == nil { - return 0 - } - return *p.Commits -} - -// GetCommitsURL returns the CommitsURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCommitsURL() string { - if p == nil || p.CommitsURL == nil { - return "" - } - return *p.CommitsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCreatedAt() time.Time { - if p == nil || p.CreatedAt == nil { - return time.Time{} - } - return *p.CreatedAt -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetDeletions() int { - if p == nil || p.Deletions == nil { - return 0 - } - return *p.Deletions -} - -// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetDiffURL() string { - if p == nil || p.DiffURL == nil { - return "" - } - return *p.DiffURL -} - -// GetHead returns the Head field. -func (p *PullRequest) GetHead() *PullRequestBranch { - if p == nil { - return nil - } - return p.Head -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetIssueURL returns the IssueURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetIssueURL() string { - if p == nil || p.IssueURL == nil { - return "" - } - return *p.IssueURL -} - -// GetLinks returns the Links field. -func (p *PullRequest) GetLinks() *PRLinks { - if p == nil { - return nil - } - return p.Links -} - -// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMaintainerCanModify() bool { - if p == nil || p.MaintainerCanModify == nil { - return false - } - return *p.MaintainerCanModify -} - -// GetMergeable returns the Mergeable field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergeable() bool { - if p == nil || p.Mergeable == nil { - return false - } - return *p.Mergeable -} - -// GetMergeableState returns the MergeableState field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergeableState() string { - if p == nil || p.MergeableState == nil { - return "" - } - return *p.MergeableState -} - -// GetMergeCommitSHA returns the MergeCommitSHA field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergeCommitSHA() string { - if p == nil || p.MergeCommitSHA == nil { - return "" - } - return *p.MergeCommitSHA -} - -// GetMerged returns the Merged field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMerged() bool { - if p == nil || p.Merged == nil { - return false - } - return *p.Merged -} - -// GetMergedAt returns the MergedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergedAt() time.Time { - if p == nil || p.MergedAt == nil { - return time.Time{} - } - return *p.MergedAt -} - -// GetMergedBy returns the MergedBy field. -func (p *PullRequest) GetMergedBy() *User { - if p == nil { - return nil - } - return p.MergedBy -} - -// GetMilestone returns the Milestone field. -func (p *PullRequest) GetMilestone() *Milestone { - if p == nil { - return nil - } - return p.Milestone -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetPatchURL() string { - if p == nil || p.PatchURL == nil { - return "" - } - return *p.PatchURL -} - -// GetReviewCommentsURL returns the ReviewCommentsURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetReviewCommentsURL() string { - if p == nil || p.ReviewCommentsURL == nil { - return "" - } - return *p.ReviewCommentsURL -} - -// GetReviewCommentURL returns the ReviewCommentURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetReviewCommentURL() string { - if p == nil || p.ReviewCommentURL == nil { - return "" - } - return *p.ReviewCommentURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetStatusesURL() string { - if p == nil || p.StatusesURL == nil { - return "" - } - return *p.StatusesURL -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetTitle() string { - if p == nil || p.Title == nil { - return "" - } - return *p.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetUpdatedAt() time.Time { - if p == nil || p.UpdatedAt == nil { - return time.Time{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetUser returns the User field. -func (p *PullRequest) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetLabel returns the Label field if it's non-nil, zero value otherwise. -func (p *PullRequestBranch) GetLabel() string { - if p == nil || p.Label == nil { - return "" - } - return *p.Label -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (p *PullRequestBranch) GetRef() string { - if p == nil || p.Ref == nil { - return "" - } - return *p.Ref -} - -// GetRepo returns the Repo field. -func (p *PullRequestBranch) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *PullRequestBranch) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetUser returns the User field. -func (p *PullRequestBranch) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetAuthorAssociation() string { - if p == nil || p.AuthorAssociation == nil { - return "" - } - return *p.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetCommitID() string { - if p == nil || p.CommitID == nil { - return "" - } - return *p.CommitID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetCreatedAt() time.Time { - if p == nil || p.CreatedAt == nil { - return time.Time{} - } - return *p.CreatedAt -} - -// GetDiffHunk returns the DiffHunk field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetDiffHunk() string { - if p == nil || p.DiffHunk == nil { - return "" - } - return *p.DiffHunk -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetInReplyTo returns the InReplyTo field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetInReplyTo() int64 { - if p == nil || p.InReplyTo == nil { - return 0 - } - return *p.InReplyTo -} - -// GetOriginalCommitID returns the OriginalCommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetOriginalCommitID() string { - if p == nil || p.OriginalCommitID == nil { - return "" - } - return *p.OriginalCommitID -} - -// GetOriginalPosition returns the OriginalPosition field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetOriginalPosition() int { - if p == nil || p.OriginalPosition == nil { - return 0 - } - return *p.OriginalPosition -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPath() string { - if p == nil || p.Path == nil { - return "" - } - return *p.Path -} - -// GetPosition returns the Position field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPosition() int { - if p == nil || p.Position == nil { - return 0 - } - return *p.Position -} - -// GetPullRequestReviewID returns the PullRequestReviewID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPullRequestReviewID() int64 { - if p == nil || p.PullRequestReviewID == nil { - return 0 - } - return *p.PullRequestReviewID -} - -// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPullRequestURL() string { - if p == nil || p.PullRequestURL == nil { - return "" - } - return *p.PullRequestURL -} - -// GetReactions returns the Reactions field. -func (p *PullRequestComment) GetReactions() *Reactions { - if p == nil { - return nil - } - return p.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetUpdatedAt() time.Time { - if p == nil || p.UpdatedAt == nil { - return time.Time{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetUser returns the User field. -func (p *PullRequestComment) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAssignee returns the Assignee field. -func (p *PullRequestEvent) GetAssignee() *User { - if p == nil { - return nil - } - return p.Assignee -} - -// GetChanges returns the Changes field. -func (p *PullRequestEvent) GetChanges() *EditChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *PullRequestEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetLabel returns the Label field. -func (p *PullRequestEvent) GetLabel() *Label { - if p == nil { - return nil - } - return p.Label -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *PullRequestEvent) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetOrganization returns the Organization field. -func (p *PullRequestEvent) GetOrganization() *Organization { - if p == nil { - return nil - } - return p.Organization -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetRequestedReviewer returns the RequestedReviewer field. -func (p *PullRequestEvent) GetRequestedReviewer() *User { - if p == nil { - return nil - } - return p.RequestedReviewer -} - -// GetSender returns the Sender field. -func (p *PullRequestEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetDiffURL() string { - if p == nil || p.DiffURL == nil { - return "" - } - return *p.DiffURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetPatchURL() string { - if p == nil || p.PatchURL == nil { - return "" - } - return *p.PatchURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetMerged returns the Merged field if it's non-nil, zero value otherwise. -func (p *PullRequestMergeResult) GetMerged() bool { - if p == nil || p.Merged == nil { - return false - } - return *p.Merged -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PullRequestMergeResult) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *PullRequestMergeResult) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetCommitID() string { - if p == nil || p.CommitID == nil { - return "" - } - return *p.CommitID -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetPullRequestURL() string { - if p == nil || p.PullRequestURL == nil { - return "" - } - return *p.PullRequestURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetSubmittedAt returns the SubmittedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetSubmittedAt() time.Time { - if p == nil || p.SubmittedAt == nil { - return time.Time{} - } - return *p.SubmittedAt -} - -// GetUser returns the User field. -func (p *PullRequestReview) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewCommentEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetChanges returns the Changes field. -func (p *PullRequestReviewCommentEvent) GetChanges() *EditChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetComment returns the Comment field. -func (p *PullRequestReviewCommentEvent) GetComment() *PullRequestComment { - if p == nil { - return nil - } - return p.Comment -} - -// GetInstallation returns the Installation field. -func (p *PullRequestReviewCommentEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestReviewCommentEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestReviewCommentEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PullRequestReviewCommentEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewDismissalRequest) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetInstallation returns the Installation field. -func (p *PullRequestReviewEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrganization returns the Organization field. -func (p *PullRequestReviewEvent) GetOrganization() *Organization { - if p == nil { - return nil - } - return p.Organization -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestReviewEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestReviewEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetReview returns the Review field. -func (p *PullRequestReviewEvent) GetReview() *PullRequestReview { - if p == nil { - return nil - } - return p.Review -} - -// GetSender returns the Sender field. -func (p *PullRequestReviewEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetCommitID() string { - if p == nil || p.CommitID == nil { - return "" - } - return *p.CommitID -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetEvent() string { - if p == nil || p.Event == nil { - return "" - } - return *p.Event -} - -// GetDismissalRestrictionsRequest returns the DismissalRestrictionsRequest field. -func (p *PullRequestReviewsEnforcementRequest) GetDismissalRestrictionsRequest() *DismissalRestrictionsRequest { - if p == nil { - return nil - } - return p.DismissalRestrictionsRequest -} - -// GetDismissalRestrictionsRequest returns the DismissalRestrictionsRequest field. -func (p *PullRequestReviewsEnforcementUpdate) GetDismissalRestrictionsRequest() *DismissalRestrictionsRequest { - if p == nil { - return nil - } - return p.DismissalRestrictionsRequest -} - -// GetDismissStaleReviews returns the DismissStaleReviews field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewsEnforcementUpdate) GetDismissStaleReviews() bool { - if p == nil || p.DismissStaleReviews == nil { - return false - } - return *p.DismissStaleReviews -} - -// GetMergablePulls returns the MergablePulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetMergablePulls() int { - if p == nil || p.MergablePulls == nil { - return 0 - } - return *p.MergablePulls -} - -// GetMergedPulls returns the MergedPulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetMergedPulls() int { - if p == nil || p.MergedPulls == nil { - return 0 - } - return *p.MergedPulls -} - -// GetTotalPulls returns the TotalPulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetTotalPulls() int { - if p == nil || p.TotalPulls == nil { - return 0 - } - return *p.TotalPulls -} - -// GetUnmergablePulls returns the UnmergablePulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetUnmergablePulls() int { - if p == nil || p.UnmergablePulls == nil { - return 0 - } - return *p.UnmergablePulls -} - -// GetCommits returns the Commits field if it's non-nil, zero value otherwise. -func (p *PunchCard) GetCommits() int { - if p == nil || p.Commits == nil { - return 0 - } - return *p.Commits -} - -// GetDay returns the Day field if it's non-nil, zero value otherwise. -func (p *PunchCard) GetDay() int { - if p == nil || p.Day == nil { - return 0 - } - return *p.Day -} - -// GetHour returns the Hour field if it's non-nil, zero value otherwise. -func (p *PunchCard) GetHour() int { - if p == nil || p.Hour == nil { - return 0 - } - return *p.Hour -} - -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetAfter() string { - if p == nil || p.After == nil { - return "" - } - return *p.After -} - -// GetBaseRef returns the BaseRef field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetBaseRef() string { - if p == nil || p.BaseRef == nil { - return "" - } - return *p.BaseRef -} - -// GetBefore returns the Before field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetBefore() string { - if p == nil || p.Before == nil { - return "" - } - return *p.Before -} - -// GetCompare returns the Compare field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetCompare() string { - if p == nil || p.Compare == nil { - return "" - } - return *p.Compare -} - -// GetCreated returns the Created field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetCreated() bool { - if p == nil || p.Created == nil { - return false - } - return *p.Created -} - -// GetDeleted returns the Deleted field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetDeleted() bool { - if p == nil || p.Deleted == nil { - return false - } - return *p.Deleted -} - -// GetDistinctSize returns the DistinctSize field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetDistinctSize() int { - if p == nil || p.DistinctSize == nil { - return 0 - } - return *p.DistinctSize -} - -// GetForced returns the Forced field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetForced() bool { - if p == nil || p.Forced == nil { - return false - } - return *p.Forced -} - -// GetHead returns the Head field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetHead() string { - if p == nil || p.Head == nil { - return "" - } - return *p.Head -} - -// GetHeadCommit returns the HeadCommit field. -func (p *PushEvent) GetHeadCommit() *PushEventCommit { - if p == nil { - return nil - } - return p.HeadCommit -} - -// GetInstallation returns the Installation field. -func (p *PushEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetPusher returns the Pusher field. -func (p *PushEvent) GetPusher() *User { - if p == nil { - return nil - } - return p.Pusher -} - -// GetPushID returns the PushID field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetPushID() int64 { - if p == nil || p.PushID == nil { - return 0 - } - return *p.PushID -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetRef() string { - if p == nil || p.Ref == nil { - return "" - } - return *p.Ref -} - -// GetRepo returns the Repo field. -func (p *PushEvent) GetRepo() *PushEventRepository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PushEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetSize() int { - if p == nil || p.Size == nil { - return 0 - } - return *p.Size -} - -// GetAuthor returns the Author field. -func (p *PushEventCommit) GetAuthor() *CommitAuthor { - if p == nil { - return nil - } - return p.Author -} - -// GetCommitter returns the Committer field. -func (p *PushEventCommit) GetCommitter() *CommitAuthor { - if p == nil { - return nil - } - return p.Committer -} - -// GetDistinct returns the Distinct field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetDistinct() bool { - if p == nil || p.Distinct == nil { - return false - } - return *p.Distinct -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetID() string { - if p == nil || p.ID == nil { - return "" - } - return *p.ID -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetTimestamp() Timestamp { - if p == nil || p.Timestamp == nil { - return Timestamp{} - } - return *p.Timestamp -} - -// GetTreeID returns the TreeID field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetTreeID() string { - if p == nil || p.TreeID == nil { - return "" - } - return *p.TreeID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PushEventCommit) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (p *PushEventRepoOwner) GetEmail() string { - if p == nil || p.Email == nil { - return "" - } - return *p.Email -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PushEventRepoOwner) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetArchiveURL returns the ArchiveURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetArchiveURL() string { - if p == nil || p.ArchiveURL == nil { - return "" - } - return *p.ArchiveURL -} - -// GetCloneURL returns the CloneURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetCloneURL() string { - if p == nil || p.CloneURL == nil { - return "" - } - return *p.CloneURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetDefaultBranch() string { - if p == nil || p.DefaultBranch == nil { - return "" - } - return *p.DefaultBranch -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetDescription() string { - if p == nil || p.Description == nil { - return "" - } - return *p.Description -} - -// GetFork returns the Fork field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetFork() bool { - if p == nil || p.Fork == nil { - return false - } - return *p.Fork -} - -// GetForksCount returns the ForksCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetForksCount() int { - if p == nil || p.ForksCount == nil { - return 0 - } - return *p.ForksCount -} - -// GetFullName returns the FullName field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetFullName() string { - if p == nil || p.FullName == nil { - return "" - } - return *p.FullName -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetGitURL() string { - if p == nil || p.GitURL == nil { - return "" - } - return *p.GitURL -} - -// GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasDownloads() bool { - if p == nil || p.HasDownloads == nil { - return false - } - return *p.HasDownloads -} - -// GetHasIssues returns the HasIssues field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasIssues() bool { - if p == nil || p.HasIssues == nil { - return false - } - return *p.HasIssues -} - -// GetHasPages returns the HasPages field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasPages() bool { - if p == nil || p.HasPages == nil { - return false - } - return *p.HasPages -} - -// GetHasWiki returns the HasWiki field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasWiki() bool { - if p == nil || p.HasWiki == nil { - return false - } - return *p.HasWiki -} - -// GetHomepage returns the Homepage field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHomepage() string { - if p == nil || p.Homepage == nil { - return "" - } - return *p.Homepage -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetLanguage returns the Language field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetLanguage() string { - if p == nil || p.Language == nil { - return "" - } - return *p.Language -} - -// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetMasterBranch() string { - if p == nil || p.MasterBranch == nil { - return "" - } - return *p.MasterBranch -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetOpenIssuesCount returns the OpenIssuesCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetOpenIssuesCount() int { - if p == nil || p.OpenIssuesCount == nil { - return 0 - } - return *p.OpenIssuesCount -} - -// GetOrganization returns the Organization field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetOrganization() string { - if p == nil || p.Organization == nil { - return "" - } - return *p.Organization -} - -// GetOwner returns the Owner field. -func (p *PushEventRepository) GetOwner() *User { - if p == nil { - return nil - } - return p.Owner -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetPrivate() bool { - if p == nil || p.Private == nil { - return false - } - return *p.Private -} - -// GetPushedAt returns the PushedAt field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetPushedAt() Timestamp { - if p == nil || p.PushedAt == nil { - return Timestamp{} - } - return *p.PushedAt -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetSize() int { - if p == nil || p.Size == nil { - return 0 - } - return *p.Size -} - -// GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetSSHURL() string { - if p == nil || p.SSHURL == nil { - return "" - } - return *p.SSHURL -} - -// GetStargazersCount returns the StargazersCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetStargazersCount() int { - if p == nil || p.StargazersCount == nil { - return 0 - } - return *p.StargazersCount -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetStatusesURL() string { - if p == nil || p.StatusesURL == nil { - return "" - } - return *p.StatusesURL -} - -// GetSVNURL returns the SVNURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetSVNURL() string { - if p == nil || p.SVNURL == nil { - return "" - } - return *p.SVNURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetWatchersCount returns the WatchersCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetWatchersCount() int { - if p == nil || p.WatchersCount == nil { - return 0 - } - return *p.WatchersCount -} - -// GetCore returns the Core field. -func (r *RateLimits) GetCore() *Rate { - if r == nil { - return nil - } - return r.Core -} - -// GetSearch returns the Search field. -func (r *RateLimits) GetSearch() *Rate { - if r == nil { - return nil - } - return r.Search -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (r *Reaction) GetContent() string { - if r == nil || r.Content == nil { - return "" - } - return *r.Content -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *Reaction) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Reaction) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetUser returns the User field. -func (r *Reaction) GetUser() *User { - if r == nil { - return nil - } - return r.User -} - -// GetConfused returns the Confused field if it's non-nil, zero value otherwise. -func (r *Reactions) GetConfused() int { - if r == nil || r.Confused == nil { - return 0 - } - return *r.Confused -} - -// GetHeart returns the Heart field if it's non-nil, zero value otherwise. -func (r *Reactions) GetHeart() int { - if r == nil || r.Heart == nil { - return 0 - } - return *r.Heart -} - -// GetHooray returns the Hooray field if it's non-nil, zero value otherwise. -func (r *Reactions) GetHooray() int { - if r == nil || r.Hooray == nil { - return 0 - } - return *r.Hooray -} - -// GetLaugh returns the Laugh field if it's non-nil, zero value otherwise. -func (r *Reactions) GetLaugh() int { - if r == nil || r.Laugh == nil { - return 0 - } - return *r.Laugh -} - -// GetMinusOne returns the MinusOne field if it's non-nil, zero value otherwise. -func (r *Reactions) GetMinusOne() int { - if r == nil || r.MinusOne == nil { - return 0 - } - return *r.MinusOne -} - -// GetPlusOne returns the PlusOne field if it's non-nil, zero value otherwise. -func (r *Reactions) GetPlusOne() int { - if r == nil || r.PlusOne == nil { - return 0 - } - return *r.PlusOne -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (r *Reactions) GetTotalCount() int { - if r == nil || r.TotalCount == nil { - return 0 - } - return *r.TotalCount -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *Reactions) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Reference) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetObject returns the Object field. -func (r *Reference) GetObject() *GitObject { - if r == nil { - return nil - } - return r.Object -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (r *Reference) GetRef() string { - if r == nil || r.Ref == nil { - return "" - } - return *r.Ref -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *Reference) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetBrowserDownloadURL returns the BrowserDownloadURL field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetBrowserDownloadURL() string { - if r == nil || r.BrowserDownloadURL == nil { - return "" - } - return *r.BrowserDownloadURL -} - -// GetContentType returns the ContentType field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetContentType() string { - if r == nil || r.ContentType == nil { - return "" - } - return *r.ContentType -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDownloadCount returns the DownloadCount field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetDownloadCount() int { - if r == nil || r.DownloadCount == nil { - return 0 - } - return *r.DownloadCount -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetLabel returns the Label field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetLabel() string { - if r == nil || r.Label == nil { - return "" - } - return *r.Label -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetState() string { - if r == nil || r.State == nil { - return "" - } - return *r.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetUploader returns the Uploader field. -func (r *ReleaseAsset) GetUploader() *User { - if r == nil { - return nil - } - return r.Uploader -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *ReleaseEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetInstallation returns the Installation field. -func (r *ReleaseEvent) GetInstallation() *Installation { - if r == nil { - return nil - } - return r.Installation -} - -// GetRelease returns the Release field. -func (r *ReleaseEvent) GetRelease() *RepositoryRelease { - if r == nil { - return nil - } - return r.Release -} - -// GetRepo returns the Repo field. -func (r *ReleaseEvent) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetSender returns the Sender field. -func (r *ReleaseEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *Rename) GetFrom() string { - if r == nil || r.From == nil { - return "" - } - return *r.From -} - -// GetTo returns the To field if it's non-nil, zero value otherwise. -func (r *Rename) GetTo() string { - if r == nil || r.To == nil { - return "" - } - return *r.To -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (r *RepositoriesSearchResult) GetIncompleteResults() bool { - if r == nil || r.IncompleteResults == nil { - return false - } - return *r.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (r *RepositoriesSearchResult) GetTotal() int { - if r == nil || r.Total == nil { - return 0 - } - return *r.Total -} - -// GetAllowMergeCommit returns the AllowMergeCommit field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowMergeCommit() bool { - if r == nil || r.AllowMergeCommit == nil { - return false - } - return *r.AllowMergeCommit -} - -// GetAllowRebaseMerge returns the AllowRebaseMerge field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowRebaseMerge() bool { - if r == nil || r.AllowRebaseMerge == nil { - return false - } - return *r.AllowRebaseMerge -} - -// GetAllowSquashMerge returns the AllowSquashMerge field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowSquashMerge() bool { - if r == nil || r.AllowSquashMerge == nil { - return false - } - return *r.AllowSquashMerge -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (r *Repository) GetArchived() bool { - if r == nil || r.Archived == nil { - return false - } - return *r.Archived -} - -// GetArchiveURL returns the ArchiveURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetArchiveURL() string { - if r == nil || r.ArchiveURL == nil { - return "" - } - return *r.ArchiveURL -} - -// GetAssigneesURL returns the AssigneesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetAssigneesURL() string { - if r == nil || r.AssigneesURL == nil { - return "" - } - return *r.AssigneesURL -} - -// GetAutoInit returns the AutoInit field if it's non-nil, zero value otherwise. -func (r *Repository) GetAutoInit() bool { - if r == nil || r.AutoInit == nil { - return false - } - return *r.AutoInit -} - -// GetBlobsURL returns the BlobsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetBlobsURL() string { - if r == nil || r.BlobsURL == nil { - return "" - } - return *r.BlobsURL -} - -// GetBranchesURL returns the BranchesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetBranchesURL() string { - if r == nil || r.BranchesURL == nil { - return "" - } - return *r.BranchesURL -} - -// GetCloneURL returns the CloneURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCloneURL() string { - if r == nil || r.CloneURL == nil { - return "" - } - return *r.CloneURL -} - -// GetCodeOfConduct returns the CodeOfConduct field. -func (r *Repository) GetCodeOfConduct() *CodeOfConduct { - if r == nil { - return nil - } - return r.CodeOfConduct -} - -// GetCollaboratorsURL returns the CollaboratorsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCollaboratorsURL() string { - if r == nil || r.CollaboratorsURL == nil { - return "" - } - return *r.CollaboratorsURL -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCommentsURL() string { - if r == nil || r.CommentsURL == nil { - return "" - } - return *r.CommentsURL -} - -// GetCommitsURL returns the CommitsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCommitsURL() string { - if r == nil || r.CommitsURL == nil { - return "" - } - return *r.CommitsURL -} - -// GetCompareURL returns the CompareURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCompareURL() string { - if r == nil || r.CompareURL == nil { - return "" - } - return *r.CompareURL -} - -// GetContentsURL returns the ContentsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetContentsURL() string { - if r == nil || r.ContentsURL == nil { - return "" - } - return *r.ContentsURL -} - -// GetContributorsURL returns the ContributorsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetContributorsURL() string { - if r == nil || r.ContributorsURL == nil { - return "" - } - return *r.ContributorsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *Repository) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise. -func (r *Repository) GetDefaultBranch() string { - if r == nil || r.DefaultBranch == nil { - return "" - } - return *r.DefaultBranch -} - -// GetDeploymentsURL returns the DeploymentsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetDeploymentsURL() string { - if r == nil || r.DeploymentsURL == nil { - return "" - } - return *r.DeploymentsURL -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (r *Repository) GetDescription() string { - if r == nil || r.Description == nil { - return "" - } - return *r.Description -} - -// GetDownloadsURL returns the DownloadsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetDownloadsURL() string { - if r == nil || r.DownloadsURL == nil { - return "" - } - return *r.DownloadsURL -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetEventsURL() string { - if r == nil || r.EventsURL == nil { - return "" - } - return *r.EventsURL -} - -// GetFork returns the Fork field if it's non-nil, zero value otherwise. -func (r *Repository) GetFork() bool { - if r == nil || r.Fork == nil { - return false - } - return *r.Fork -} - -// GetForksCount returns the ForksCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetForksCount() int { - if r == nil || r.ForksCount == nil { - return 0 - } - return *r.ForksCount -} - -// GetForksURL returns the ForksURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetForksURL() string { - if r == nil || r.ForksURL == nil { - return "" - } - return *r.ForksURL -} - -// GetFullName returns the FullName field if it's non-nil, zero value otherwise. -func (r *Repository) GetFullName() string { - if r == nil || r.FullName == nil { - return "" - } - return *r.FullName -} - -// GetGitCommitsURL returns the GitCommitsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitCommitsURL() string { - if r == nil || r.GitCommitsURL == nil { - return "" - } - return *r.GitCommitsURL -} - -// GetGitignoreTemplate returns the GitignoreTemplate field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitignoreTemplate() string { - if r == nil || r.GitignoreTemplate == nil { - return "" - } - return *r.GitignoreTemplate -} - -// GetGitRefsURL returns the GitRefsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitRefsURL() string { - if r == nil || r.GitRefsURL == nil { - return "" - } - return *r.GitRefsURL -} - -// GetGitTagsURL returns the GitTagsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitTagsURL() string { - if r == nil || r.GitTagsURL == nil { - return "" - } - return *r.GitTagsURL -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitURL() string { - if r == nil || r.GitURL == nil { - return "" - } - return *r.GitURL -} - -// GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasDownloads() bool { - if r == nil || r.HasDownloads == nil { - return false - } - return *r.HasDownloads -} - -// GetHasIssues returns the HasIssues field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasIssues() bool { - if r == nil || r.HasIssues == nil { - return false - } - return *r.HasIssues -} - -// GetHasPages returns the HasPages field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasPages() bool { - if r == nil || r.HasPages == nil { - return false - } - return *r.HasPages -} - -// GetHasProjects returns the HasProjects field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasProjects() bool { - if r == nil || r.HasProjects == nil { - return false - } - return *r.HasProjects -} - -// GetHasWiki returns the HasWiki field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasWiki() bool { - if r == nil || r.HasWiki == nil { - return false - } - return *r.HasWiki -} - -// GetHomepage returns the Homepage field if it's non-nil, zero value otherwise. -func (r *Repository) GetHomepage() string { - if r == nil || r.Homepage == nil { - return "" - } - return *r.Homepage -} - -// GetHooksURL returns the HooksURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetHooksURL() string { - if r == nil || r.HooksURL == nil { - return "" - } - return *r.HooksURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *Repository) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetIssueCommentURL returns the IssueCommentURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetIssueCommentURL() string { - if r == nil || r.IssueCommentURL == nil { - return "" - } - return *r.IssueCommentURL -} - -// GetIssueEventsURL returns the IssueEventsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetIssueEventsURL() string { - if r == nil || r.IssueEventsURL == nil { - return "" - } - return *r.IssueEventsURL -} - -// GetIssuesURL returns the IssuesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetIssuesURL() string { - if r == nil || r.IssuesURL == nil { - return "" - } - return *r.IssuesURL -} - -// GetKeysURL returns the KeysURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetKeysURL() string { - if r == nil || r.KeysURL == nil { - return "" - } - return *r.KeysURL -} - -// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetLabelsURL() string { - if r == nil || r.LabelsURL == nil { - return "" - } - return *r.LabelsURL -} - -// GetLanguage returns the Language field if it's non-nil, zero value otherwise. -func (r *Repository) GetLanguage() string { - if r == nil || r.Language == nil { - return "" - } - return *r.Language -} - -// GetLanguagesURL returns the LanguagesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetLanguagesURL() string { - if r == nil || r.LanguagesURL == nil { - return "" - } - return *r.LanguagesURL -} - -// GetLicense returns the License field. -func (r *Repository) GetLicense() *License { - if r == nil { - return nil - } - return r.License -} - -// GetLicenseTemplate returns the LicenseTemplate field if it's non-nil, zero value otherwise. -func (r *Repository) GetLicenseTemplate() string { - if r == nil || r.LicenseTemplate == nil { - return "" - } - return *r.LicenseTemplate -} - -// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise. -func (r *Repository) GetMasterBranch() string { - if r == nil || r.MasterBranch == nil { - return "" - } - return *r.MasterBranch -} - -// GetMergesURL returns the MergesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetMergesURL() string { - if r == nil || r.MergesURL == nil { - return "" - } - return *r.MergesURL -} - -// GetMilestonesURL returns the MilestonesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetMilestonesURL() string { - if r == nil || r.MilestonesURL == nil { - return "" - } - return *r.MilestonesURL -} - -// GetMirrorURL returns the MirrorURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetMirrorURL() string { - if r == nil || r.MirrorURL == nil { - return "" - } - return *r.MirrorURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *Repository) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNetworkCount returns the NetworkCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetNetworkCount() int { - if r == nil || r.NetworkCount == nil { - return 0 - } - return *r.NetworkCount -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Repository) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetNotificationsURL returns the NotificationsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetNotificationsURL() string { - if r == nil || r.NotificationsURL == nil { - return "" - } - return *r.NotificationsURL -} - -// GetOpenIssuesCount returns the OpenIssuesCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetOpenIssuesCount() int { - if r == nil || r.OpenIssuesCount == nil { - return 0 - } - return *r.OpenIssuesCount -} - -// GetOrganization returns the Organization field. -func (r *Repository) GetOrganization() *Organization { - if r == nil { - return nil - } - return r.Organization -} - -// GetOwner returns the Owner field. -func (r *Repository) GetOwner() *User { - if r == nil { - return nil - } - return r.Owner -} - -// GetParent returns the Parent field. -func (r *Repository) GetParent() *Repository { - if r == nil { - return nil - } - return r.Parent -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (r *Repository) GetPermissions() map[string]bool { - if r == nil || r.Permissions == nil { - return map[string]bool{} - } - return *r.Permissions -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (r *Repository) GetPrivate() bool { - if r == nil || r.Private == nil { - return false - } - return *r.Private -} - -// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetPullsURL() string { - if r == nil || r.PullsURL == nil { - return "" - } - return *r.PullsURL -} - -// GetPushedAt returns the PushedAt field if it's non-nil, zero value otherwise. -func (r *Repository) GetPushedAt() Timestamp { - if r == nil || r.PushedAt == nil { - return Timestamp{} - } - return *r.PushedAt -} - -// GetReleasesURL returns the ReleasesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetReleasesURL() string { - if r == nil || r.ReleasesURL == nil { - return "" - } - return *r.ReleasesURL -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *Repository) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetSource returns the Source field. -func (r *Repository) GetSource() *Repository { - if r == nil { - return nil - } - return r.Source -} - -// GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSSHURL() string { - if r == nil || r.SSHURL == nil { - return "" - } - return *r.SSHURL -} - -// GetStargazersCount returns the StargazersCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetStargazersCount() int { - if r == nil || r.StargazersCount == nil { - return 0 - } - return *r.StargazersCount -} - -// GetStargazersURL returns the StargazersURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetStargazersURL() string { - if r == nil || r.StargazersURL == nil { - return "" - } - return *r.StargazersURL -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetStatusesURL() string { - if r == nil || r.StatusesURL == nil { - return "" - } - return *r.StatusesURL -} - -// GetSubscribersCount returns the SubscribersCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetSubscribersCount() int { - if r == nil || r.SubscribersCount == nil { - return 0 - } - return *r.SubscribersCount -} - -// GetSubscribersURL returns the SubscribersURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSubscribersURL() string { - if r == nil || r.SubscribersURL == nil { - return "" - } - return *r.SubscribersURL -} - -// GetSubscriptionURL returns the SubscriptionURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSubscriptionURL() string { - if r == nil || r.SubscriptionURL == nil { - return "" - } - return *r.SubscriptionURL -} - -// GetSVNURL returns the SVNURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSVNURL() string { - if r == nil || r.SVNURL == nil { - return "" - } - return *r.SVNURL -} - -// GetTagsURL returns the TagsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetTagsURL() string { - if r == nil || r.TagsURL == nil { - return "" - } - return *r.TagsURL -} - -// GetTeamID returns the TeamID field if it's non-nil, zero value otherwise. -func (r *Repository) GetTeamID() int64 { - if r == nil || r.TeamID == nil { - return 0 - } - return *r.TeamID -} - -// GetTeamsURL returns the TeamsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetTeamsURL() string { - if r == nil || r.TeamsURL == nil { - return "" - } - return *r.TeamsURL -} - -// GetTreesURL returns the TreesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetTreesURL() string { - if r == nil || r.TreesURL == nil { - return "" - } - return *r.TreesURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *Repository) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *Repository) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetWatchersCount returns the WatchersCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetWatchersCount() int { - if r == nil || r.WatchersCount == nil { - return 0 - } - return *r.WatchersCount -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetBody() string { - if r == nil || r.Body == nil { - return "" - } - return *r.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetCommitID() string { - if r == nil || r.CommitID == nil { - return "" - } - return *r.CommitID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetCreatedAt() time.Time { - if r == nil || r.CreatedAt == nil { - return time.Time{} - } - return *r.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetPosition returns the Position field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetPosition() int { - if r == nil || r.Position == nil { - return 0 - } - return *r.Position -} - -// GetReactions returns the Reactions field. -func (r *RepositoryComment) GetReactions() *Reactions { - if r == nil { - return nil - } - return r.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetUpdatedAt() time.Time { - if r == nil || r.UpdatedAt == nil { - return time.Time{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetUser returns the User field. -func (r *RepositoryComment) GetUser() *User { - if r == nil { - return nil - } - return r.User -} - -// GetAuthor returns the Author field. -func (r *RepositoryCommit) GetAuthor() *User { - if r == nil { - return nil - } - return r.Author -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetCommentsURL() string { - if r == nil || r.CommentsURL == nil { - return "" - } - return *r.CommentsURL -} - -// GetCommit returns the Commit field. -func (r *RepositoryCommit) GetCommit() *Commit { - if r == nil { - return nil - } - return r.Commit -} - -// GetCommitter returns the Committer field. -func (r *RepositoryCommit) GetCommitter() *User { - if r == nil { - return nil - } - return r.Committer -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetStats returns the Stats field. -func (r *RepositoryCommit) GetStats() *CommitStats { - if r == nil { - return nil - } - return r.Stats -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetDownloadURL() string { - if r == nil || r.DownloadURL == nil { - return "" - } - return *r.DownloadURL -} - -// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetEncoding() string { - if r == nil || r.Encoding == nil { - return "" - } - return *r.Encoding -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetGitURL() string { - if r == nil || r.GitURL == nil { - return "" - } - return *r.GitURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetType() string { - if r == nil || r.Type == nil { - return "" - } - return *r.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetAuthor returns the Author field. -func (r *RepositoryContentFileOptions) GetAuthor() *CommitAuthor { - if r == nil { - return nil - } - return r.Author -} - -// GetBranch returns the Branch field if it's non-nil, zero value otherwise. -func (r *RepositoryContentFileOptions) GetBranch() string { - if r == nil || r.Branch == nil { - return "" - } - return *r.Branch -} - -// GetCommitter returns the Committer field. -func (r *RepositoryContentFileOptions) GetCommitter() *CommitAuthor { - if r == nil { - return nil - } - return r.Committer -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (r *RepositoryContentFileOptions) GetMessage() string { - if r == nil || r.Message == nil { - return "" - } - return *r.Message -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryContentFileOptions) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetContent returns the Content field. -func (r *RepositoryContentResponse) GetContent() *RepositoryContent { - if r == nil { - return nil - } - return r.Content -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *RepositoryEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetInstallation returns the Installation field. -func (r *RepositoryEvent) GetInstallation() *Installation { - if r == nil { - return nil - } - return r.Installation -} - -// GetOrg returns the Org field. -func (r *RepositoryEvent) GetOrg() *Organization { - if r == nil { - return nil - } - return r.Org -} - -// GetRepo returns the Repo field. -func (r *RepositoryEvent) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetSender returns the Sender field. -func (r *RepositoryEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetInvitee returns the Invitee field. -func (r *RepositoryInvitation) GetInvitee() *User { - if r == nil { - return nil - } - return r.Invitee -} - -// GetInviter returns the Inviter field. -func (r *RepositoryInvitation) GetInviter() *User { - if r == nil { - return nil - } - return r.Inviter -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetPermissions() string { - if r == nil || r.Permissions == nil { - return "" - } - return *r.Permissions -} - -// GetRepo returns the Repo field. -func (r *RepositoryInvitation) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetContent() string { - if r == nil || r.Content == nil { - return "" - } - return *r.Content -} - -// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetDownloadURL() string { - if r == nil || r.DownloadURL == nil { - return "" - } - return *r.DownloadURL -} - -// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetEncoding() string { - if r == nil || r.Encoding == nil { - return "" - } - return *r.Encoding -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetGitURL() string { - if r == nil || r.GitURL == nil { - return "" - } - return *r.GitURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetLicense returns the License field. -func (r *RepositoryLicense) GetLicense() *License { - if r == nil { - return nil - } - return r.License -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetType() string { - if r == nil || r.Type == nil { - return "" - } - return *r.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetBase returns the Base field if it's non-nil, zero value otherwise. -func (r *RepositoryMergeRequest) GetBase() string { - if r == nil || r.Base == nil { - return "" - } - return *r.Base -} - -// GetCommitMessage returns the CommitMessage field if it's non-nil, zero value otherwise. -func (r *RepositoryMergeRequest) GetCommitMessage() string { - if r == nil || r.CommitMessage == nil { - return "" - } - return *r.CommitMessage -} - -// GetHead returns the Head field if it's non-nil, zero value otherwise. -func (r *RepositoryMergeRequest) GetHead() string { - if r == nil || r.Head == nil { - return "" - } - return *r.Head -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (r *RepositoryPermissionLevel) GetPermission() string { - if r == nil || r.Permission == nil { - return "" - } - return *r.Permission -} - -// GetUser returns the User field. -func (r *RepositoryPermissionLevel) GetUser() *User { - if r == nil { - return nil - } - return r.User -} - -// GetAssetsURL returns the AssetsURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetAssetsURL() string { - if r == nil || r.AssetsURL == nil { - return "" - } - return *r.AssetsURL -} - -// GetAuthor returns the Author field. -func (r *RepositoryRelease) GetAuthor() *User { - if r == nil { - return nil - } - return r.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetBody() string { - if r == nil || r.Body == nil { - return "" - } - return *r.Body -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDraft returns the Draft field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetDraft() bool { - if r == nil || r.Draft == nil { - return false - } - return *r.Draft -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetPrerelease returns the Prerelease field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetPrerelease() bool { - if r == nil || r.Prerelease == nil { - return false - } - return *r.Prerelease -} - -// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetPublishedAt() Timestamp { - if r == nil || r.PublishedAt == nil { - return Timestamp{} - } - return *r.PublishedAt -} - -// GetTagName returns the TagName field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetTagName() string { - if r == nil || r.TagName == nil { - return "" - } - return *r.TagName -} - -// GetTarballURL returns the TarballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetTarballURL() string { - if r == nil || r.TarballURL == nil { - return "" - } - return *r.TarballURL -} - -// GetTargetCommitish returns the TargetCommitish field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetTargetCommitish() string { - if r == nil || r.TargetCommitish == nil { - return "" - } - return *r.TargetCommitish -} - -// GetUploadURL returns the UploadURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetUploadURL() string { - if r == nil || r.UploadURL == nil { - return "" - } - return *r.UploadURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetZipballURL returns the ZipballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetZipballURL() string { - if r == nil || r.ZipballURL == nil { - return "" - } - return *r.ZipballURL -} - -// GetCommit returns the Commit field. -func (r *RepositoryTag) GetCommit() *Commit { - if r == nil { - return nil - } - return r.Commit -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryTag) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetTarballURL returns the TarballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryTag) GetTarballURL() string { - if r == nil || r.TarballURL == nil { - return "" - } - return *r.TarballURL -} - -// GetZipballURL returns the ZipballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryTag) GetZipballURL() string { - if r == nil || r.ZipballURL == nil { - return "" - } - return *r.ZipballURL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlertEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetForkRepos returns the ForkRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetForkRepos() int { - if r == nil || r.ForkRepos == nil { - return 0 - } - return *r.ForkRepos -} - -// GetOrgRepos returns the OrgRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetOrgRepos() int { - if r == nil || r.OrgRepos == nil { - return 0 - } - return *r.OrgRepos -} - -// GetRootRepos returns the RootRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetRootRepos() int { - if r == nil || r.RootRepos == nil { - return 0 - } - return *r.RootRepos -} - -// GetTotalPushes returns the TotalPushes field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetTotalPushes() int { - if r == nil || r.TotalPushes == nil { - return 0 - } - return *r.TotalPushes -} - -// GetTotalRepos returns the TotalRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetTotalRepos() int { - if r == nil || r.TotalRepos == nil { - return 0 - } - return *r.TotalRepos -} - -// GetTotalWikis returns the TotalWikis field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetTotalWikis() int { - if r == nil || r.TotalWikis == nil { - return 0 - } - return *r.TotalWikis -} - -// GetContext returns the Context field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetContext() string { - if r == nil || r.Context == nil { - return "" - } - return *r.Context -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetCreatedAt() time.Time { - if r == nil || r.CreatedAt == nil { - return time.Time{} - } - return *r.CreatedAt -} - -// GetCreator returns the Creator field. -func (r *RepoStatus) GetCreator() *User { - if r == nil { - return nil - } - return r.Creator -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetDescription() string { - if r == nil || r.Description == nil { - return "" - } - return *r.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetState() string { - if r == nil || r.State == nil { - return "" - } - return *r.State -} - -// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetTargetURL() string { - if r == nil || r.TargetURL == nil { - return "" - } - return *r.TargetURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetUpdatedAt() time.Time { - if r == nil || r.UpdatedAt == nil { - return time.Time{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetStrict returns the Strict field if it's non-nil, zero value otherwise. -func (r *RequiredStatusChecksRequest) GetStrict() bool { - if r == nil || r.Strict == nil { - return false - } - return *r.Strict -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *ServiceHook) GetName() string { - if s == nil || s.Name == nil { - return "" - } - return *s.Name -} - -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (s *SignaturesProtectedBranch) GetEnabled() bool { - if s == nil || s.Enabled == nil { - return false - } - return *s.Enabled -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SignaturesProtectedBranch) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetPayload returns the Payload field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetPayload() string { - if s == nil || s.Payload == nil { - return "" - } - return *s.Payload -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetReason() string { - if s == nil || s.Reason == nil { - return "" - } - return *s.Reason -} - -// GetSignature returns the Signature field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetSignature() string { - if s == nil || s.Signature == nil { - return "" - } - return *s.Signature -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetVerified() bool { - if s == nil || s.Verified == nil { - return false - } - return *s.Verified -} - -// GetActor returns the Actor field. -func (s *Source) GetActor() *User { - if s == nil { - return nil - } - return s.Actor -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *Source) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *Source) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetEmail() string { - if s == nil || s.Email == nil { - return "" - } - return *s.Email -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetImportURL returns the ImportURL field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetImportURL() string { - if s == nil || s.ImportURL == nil { - return "" - } - return *s.ImportURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetName() string { - if s == nil || s.Name == nil { - return "" - } - return *s.Name -} - -// GetRemoteID returns the RemoteID field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetRemoteID() string { - if s == nil || s.RemoteID == nil { - return "" - } - return *s.RemoteID -} - -// GetRemoteName returns the RemoteName field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetRemoteName() string { - if s == nil || s.RemoteName == nil { - return "" - } - return *s.RemoteName -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise. -func (s *Stargazer) GetStarredAt() Timestamp { - if s == nil || s.StarredAt == nil { - return Timestamp{} - } - return *s.StarredAt -} - -// GetUser returns the User field. -func (s *Stargazer) GetUser() *User { - if s == nil { - return nil - } - return s.User -} - -// GetRepository returns the Repository field. -func (s *StarredRepository) GetRepository() *Repository { - if s == nil { - return nil - } - return s.Repository -} - -// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise. -func (s *StarredRepository) GetStarredAt() Timestamp { - if s == nil || s.StarredAt == nil { - return Timestamp{} - } - return *s.StarredAt -} - -// GetCommit returns the Commit field. -func (s *StatusEvent) GetCommit() *RepositoryCommit { - if s == nil { - return nil - } - return s.Commit -} - -// GetContext returns the Context field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetContext() string { - if s == nil || s.Context == nil { - return "" - } - return *s.Context -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetDescription() string { - if s == nil || s.Description == nil { - return "" - } - return *s.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetInstallation returns the Installation field. -func (s *StatusEvent) GetInstallation() *Installation { - if s == nil { - return nil - } - return s.Installation -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetName() string { - if s == nil || s.Name == nil { - return "" - } - return *s.Name -} - -// GetRepo returns the Repo field. -func (s *StatusEvent) GetRepo() *Repository { - if s == nil { - return nil - } - return s.Repo -} - -// GetSender returns the Sender field. -func (s *StatusEvent) GetSender() *User { - if s == nil { - return nil - } - return s.Sender -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetSHA() string { - if s == nil || s.SHA == nil { - return "" - } - return *s.SHA -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetState() string { - if s == nil || s.State == nil { - return "" - } - return *s.State -} - -// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetTargetURL() string { - if s == nil || s.TargetURL == nil { - return "" - } - return *s.TargetURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetUpdatedAt() Timestamp { - if s == nil || s.UpdatedAt == nil { - return Timestamp{} - } - return *s.UpdatedAt -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *Subscription) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetIgnored returns the Ignored field if it's non-nil, zero value otherwise. -func (s *Subscription) GetIgnored() bool { - if s == nil || s.Ignored == nil { - return false - } - return *s.Ignored -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (s *Subscription) GetReason() string { - if s == nil || s.Reason == nil { - return "" - } - return *s.Reason -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (s *Subscription) GetRepositoryURL() string { - if s == nil || s.RepositoryURL == nil { - return "" - } - return *s.RepositoryURL -} - -// GetSubscribed returns the Subscribed field if it's non-nil, zero value otherwise. -func (s *Subscription) GetSubscribed() bool { - if s == nil || s.Subscribed == nil { - return false - } - return *s.Subscribed -} - -// GetThreadURL returns the ThreadURL field if it's non-nil, zero value otherwise. -func (s *Subscription) GetThreadURL() string { - if s == nil || s.ThreadURL == nil { - return "" - } - return *s.ThreadURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *Subscription) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (t *Tag) GetMessage() string { - if t == nil || t.Message == nil { - return "" - } - return *t.Message -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (t *Tag) GetNodeID() string { - if t == nil || t.NodeID == nil { - return "" - } - return *t.NodeID -} - -// GetObject returns the Object field. -func (t *Tag) GetObject() *GitObject { - if t == nil { - return nil - } - return t.Object -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *Tag) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetTag returns the Tag field if it's non-nil, zero value otherwise. -func (t *Tag) GetTag() string { - if t == nil || t.Tag == nil { - return "" - } - return *t.Tag -} - -// GetTagger returns the Tagger field. -func (t *Tag) GetTagger() *CommitAuthor { - if t == nil { - return nil - } - return t.Tagger -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *Tag) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetVerification returns the Verification field. -func (t *Tag) GetVerification() *SignatureVerification { - if t == nil { - return nil - } - return t.Verification -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (t *Team) GetDescription() string { - if t == nil || t.Description == nil { - return "" - } - return *t.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *Team) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (t *Team) GetLDAPDN() string { - if t == nil || t.LDAPDN == nil { - return "" - } - return *t.LDAPDN -} - -// GetMembersCount returns the MembersCount field if it's non-nil, zero value otherwise. -func (t *Team) GetMembersCount() int { - if t == nil || t.MembersCount == nil { - return 0 - } - return *t.MembersCount -} - -// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. -func (t *Team) GetMembersURL() string { - if t == nil || t.MembersURL == nil { - return "" - } - return *t.MembersURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *Team) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetOrganization returns the Organization field. -func (t *Team) GetOrganization() *Organization { - if t == nil { - return nil - } - return t.Organization -} - -// GetParent returns the Parent field. -func (t *Team) GetParent() *Team { - if t == nil { - return nil - } - return t.Parent -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (t *Team) GetPermission() string { - if t == nil || t.Permission == nil { - return "" - } - return *t.Permission -} - -// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise. -func (t *Team) GetPrivacy() string { - if t == nil || t.Privacy == nil { - return "" - } - return *t.Privacy -} - -// GetReposCount returns the ReposCount field if it's non-nil, zero value otherwise. -func (t *Team) GetReposCount() int { - if t == nil || t.ReposCount == nil { - return 0 - } - return *t.ReposCount -} - -// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. -func (t *Team) GetRepositoriesURL() string { - if t == nil || t.RepositoriesURL == nil { - return "" - } - return *t.RepositoriesURL -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (t *Team) GetSlug() string { - if t == nil || t.Slug == nil { - return "" - } - return *t.Slug -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *Team) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetInstallation returns the Installation field. -func (t *TeamAddEvent) GetInstallation() *Installation { - if t == nil { - return nil - } - return t.Installation -} - -// GetOrg returns the Org field. -func (t *TeamAddEvent) GetOrg() *Organization { - if t == nil { - return nil - } - return t.Org -} - -// GetRepo returns the Repo field. -func (t *TeamAddEvent) GetRepo() *Repository { - if t == nil { - return nil - } - return t.Repo -} - -// GetSender returns the Sender field. -func (t *TeamAddEvent) GetSender() *User { - if t == nil { - return nil - } - return t.Sender -} - -// GetTeam returns the Team field. -func (t *TeamAddEvent) GetTeam() *Team { - if t == nil { - return nil - } - return t.Team -} - -// GetAuthor returns the Author field. -func (t *TeamDiscussion) GetAuthor() *User { - if t == nil { - return nil - } - return t.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetBody() string { - if t == nil || t.Body == nil { - return "" - } - return *t.Body -} - -// GetBodyHTML returns the BodyHTML field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetBodyHTML() string { - if t == nil || t.BodyHTML == nil { - return "" - } - return *t.BodyHTML -} - -// GetBodyVersion returns the BodyVersion field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetBodyVersion() string { - if t == nil || t.BodyVersion == nil { - return "" - } - return *t.BodyVersion -} - -// GetCommentsCount returns the CommentsCount field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetCommentsCount() int { - if t == nil || t.CommentsCount == nil { - return 0 - } - return *t.CommentsCount -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetCommentsURL() string { - if t == nil || t.CommentsURL == nil { - return "" - } - return *t.CommentsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetCreatedAt() Timestamp { - if t == nil || t.CreatedAt == nil { - return Timestamp{} - } - return *t.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetHTMLURL() string { - if t == nil || t.HTMLURL == nil { - return "" - } - return *t.HTMLURL -} - -// GetLastEditedAt returns the LastEditedAt field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetLastEditedAt() Timestamp { - if t == nil || t.LastEditedAt == nil { - return Timestamp{} - } - return *t.LastEditedAt -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetNodeID() string { - if t == nil || t.NodeID == nil { - return "" - } - return *t.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetNumber() int { - if t == nil || t.Number == nil { - return 0 - } - return *t.Number -} - -// GetPinned returns the Pinned field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetPinned() bool { - if t == nil || t.Pinned == nil { - return false - } - return *t.Pinned -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetPrivate() bool { - if t == nil || t.Private == nil { - return false - } - return *t.Private -} - -// GetReactions returns the Reactions field. -func (t *TeamDiscussion) GetReactions() *Reactions { - if t == nil { - return nil - } - return t.Reactions -} - -// GetTeamURL returns the TeamURL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetTeamURL() string { - if t == nil || t.TeamURL == nil { - return "" - } - return *t.TeamURL -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetTitle() string { - if t == nil || t.Title == nil { - return "" - } - return *t.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetUpdatedAt() Timestamp { - if t == nil || t.UpdatedAt == nil { - return Timestamp{} - } - return *t.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (t *TeamEvent) GetAction() string { - if t == nil || t.Action == nil { - return "" - } - return *t.Action -} - -// GetChanges returns the Changes field. -func (t *TeamEvent) GetChanges() *TeamChange { - if t == nil { - return nil - } - return t.Changes -} - -// GetInstallation returns the Installation field. -func (t *TeamEvent) GetInstallation() *Installation { - if t == nil { - return nil - } - return t.Installation -} - -// GetOrg returns the Org field. -func (t *TeamEvent) GetOrg() *Organization { - if t == nil { - return nil - } - return t.Org -} - -// GetRepo returns the Repo field. -func (t *TeamEvent) GetRepo() *Repository { - if t == nil { - return nil - } - return t.Repo -} - -// GetSender returns the Sender field. -func (t *TeamEvent) GetSender() *User { - if t == nil { - return nil - } - return t.Sender -} - -// GetTeam returns the Team field. -func (t *TeamEvent) GetTeam() *Team { - if t == nil { - return nil - } - return t.Team -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetDescription() string { - if t == nil || t.Description == nil { - return "" - } - return *t.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetLDAPDN() string { - if t == nil || t.LDAPDN == nil { - return "" - } - return *t.LDAPDN -} - -// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetMembersURL() string { - if t == nil || t.MembersURL == nil { - return "" - } - return *t.MembersURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetPermission() string { - if t == nil || t.Permission == nil { - return "" - } - return *t.Permission -} - -// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetPrivacy() string { - if t == nil || t.Privacy == nil { - return "" - } - return *t.Privacy -} - -// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetRepositoriesURL() string { - if t == nil || t.RepositoriesURL == nil { - return "" - } - return *t.RepositoriesURL -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetSlug() string { - if t == nil || t.Slug == nil { - return "" - } - return *t.Slug -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetFragment returns the Fragment field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetFragment() string { - if t == nil || t.Fragment == nil { - return "" - } - return *t.Fragment -} - -// GetObjectType returns the ObjectType field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetObjectType() string { - if t == nil || t.ObjectType == nil { - return "" - } - return *t.ObjectType -} - -// GetObjectURL returns the ObjectURL field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetObjectURL() string { - if t == nil || t.ObjectURL == nil { - return "" - } - return *t.ObjectURL -} - -// GetProperty returns the Property field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetProperty() string { - if t == nil || t.Property == nil { - return "" - } - return *t.Property -} - -// GetActor returns the Actor field. -func (t *Timeline) GetActor() *User { - if t == nil { - return nil - } - return t.Actor -} - -// GetAssignee returns the Assignee field. -func (t *Timeline) GetAssignee() *User { - if t == nil { - return nil - } - return t.Assignee -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCommitID() string { - if t == nil || t.CommitID == nil { - return "" - } - return *t.CommitID -} - -// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCommitURL() string { - if t == nil || t.CommitURL == nil { - return "" - } - return *t.CommitURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCreatedAt() time.Time { - if t == nil || t.CreatedAt == nil { - return time.Time{} - } - return *t.CreatedAt -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (t *Timeline) GetEvent() string { - if t == nil || t.Event == nil { - return "" - } - return *t.Event -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *Timeline) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetLabel returns the Label field. -func (t *Timeline) GetLabel() *Label { - if t == nil { - return nil - } - return t.Label -} - -// GetMilestone returns the Milestone field. -func (t *Timeline) GetMilestone() *Milestone { - if t == nil { - return nil - } - return t.Milestone -} - -// GetRename returns the Rename field. -func (t *Timeline) GetRename() *Rename { - if t == nil { - return nil - } - return t.Rename -} - -// GetSource returns the Source field. -func (t *Timeline) GetSource() *Source { - if t == nil { - return nil - } - return t.Source -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *Timeline) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficClones) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficClones) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficData) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (t *TrafficData) GetTimestamp() Timestamp { - if t == nil || t.Timestamp == nil { - return Timestamp{} - } - return *t.Timestamp -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficData) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetPath() string { - if t == nil || t.Path == nil { - return "" - } - return *t.Path -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetTitle() string { - if t == nil || t.Title == nil { - return "" - } - return *t.Title -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficReferrer) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetReferrer returns the Referrer field if it's non-nil, zero value otherwise. -func (t *TrafficReferrer) GetReferrer() string { - if t == nil || t.Referrer == nil { - return "" - } - return *t.Referrer -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficReferrer) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficViews) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficViews) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *Tree) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetTruncated returns the Truncated field if it's non-nil, zero value otherwise. -func (t *Tree) GetTruncated() bool { - if t == nil || t.Truncated == nil { - return false - } - return *t.Truncated -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetContent() string { - if t == nil || t.Content == nil { - return "" - } - return *t.Content -} - -// GetMode returns the Mode field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetMode() string { - if t == nil || t.Mode == nil { - return "" - } - return *t.Mode -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetPath() string { - if t == nil || t.Path == nil { - return "" - } - return *t.Path -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetSize() int { - if t == nil || t.Size == nil { - return 0 - } - return *t.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetType() string { - if t == nil || t.Type == nil { - return "" - } - return *t.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetCompletedAt() Timestamp { - if u == nil || u.CompletedAt == nil { - return Timestamp{} - } - return *u.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetConclusion() string { - if u == nil || u.Conclusion == nil { - return "" - } - return *u.Conclusion -} - -// GetDetailsURL returns the DetailsURL field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetDetailsURL() string { - if u == nil || u.DetailsURL == nil { - return "" - } - return *u.DetailsURL -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetExternalID() string { - if u == nil || u.ExternalID == nil { - return "" - } - return *u.ExternalID -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetHeadBranch() string { - if u == nil || u.HeadBranch == nil { - return "" - } - return *u.HeadBranch -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetHeadSHA() string { - if u == nil || u.HeadSHA == nil { - return "" - } - return *u.HeadSHA -} - -// GetOutput returns the Output field. -func (u *UpdateCheckRunOptions) GetOutput() *CheckRunOutput { - if u == nil { - return nil - } - return u.Output -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetStatus() string { - if u == nil || u.Status == nil { - return "" - } - return *u.Status -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (u *User) GetAvatarURL() string { - if u == nil || u.AvatarURL == nil { - return "" - } - return *u.AvatarURL -} - -// GetBio returns the Bio field if it's non-nil, zero value otherwise. -func (u *User) GetBio() string { - if u == nil || u.Bio == nil { - return "" - } - return *u.Bio -} - -// GetBlog returns the Blog field if it's non-nil, zero value otherwise. -func (u *User) GetBlog() string { - if u == nil || u.Blog == nil { - return "" - } - return *u.Blog -} - -// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise. -func (u *User) GetCollaborators() int { - if u == nil || u.Collaborators == nil { - return 0 - } - return *u.Collaborators -} - -// GetCompany returns the Company field if it's non-nil, zero value otherwise. -func (u *User) GetCompany() string { - if u == nil || u.Company == nil { - return "" - } - return *u.Company -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (u *User) GetCreatedAt() Timestamp { - if u == nil || u.CreatedAt == nil { - return Timestamp{} - } - return *u.CreatedAt -} - -// GetDiskUsage returns the DiskUsage field if it's non-nil, zero value otherwise. -func (u *User) GetDiskUsage() int { - if u == nil || u.DiskUsage == nil { - return 0 - } - return *u.DiskUsage -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (u *User) GetEmail() string { - if u == nil || u.Email == nil { - return "" - } - return *u.Email -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (u *User) GetEventsURL() string { - if u == nil || u.EventsURL == nil { - return "" - } - return *u.EventsURL -} - -// GetFollowers returns the Followers field if it's non-nil, zero value otherwise. -func (u *User) GetFollowers() int { - if u == nil || u.Followers == nil { - return 0 - } - return *u.Followers -} - -// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise. -func (u *User) GetFollowersURL() string { - if u == nil || u.FollowersURL == nil { - return "" - } - return *u.FollowersURL -} - -// GetFollowing returns the Following field if it's non-nil, zero value otherwise. -func (u *User) GetFollowing() int { - if u == nil || u.Following == nil { - return 0 - } - return *u.Following -} - -// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise. -func (u *User) GetFollowingURL() string { - if u == nil || u.FollowingURL == nil { - return "" - } - return *u.FollowingURL -} - -// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise. -func (u *User) GetGistsURL() string { - if u == nil || u.GistsURL == nil { - return "" - } - return *u.GistsURL -} - -// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise. -func (u *User) GetGravatarID() string { - if u == nil || u.GravatarID == nil { - return "" - } - return *u.GravatarID -} - -// GetHireable returns the Hireable field if it's non-nil, zero value otherwise. -func (u *User) GetHireable() bool { - if u == nil || u.Hireable == nil { - return false - } - return *u.Hireable -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (u *User) GetHTMLURL() string { - if u == nil || u.HTMLURL == nil { - return "" - } - return *u.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *User) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (u *User) GetLocation() string { - if u == nil || u.Location == nil { - return "" - } - return *u.Location -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (u *User) GetLogin() string { - if u == nil || u.Login == nil { - return "" - } - return *u.Login -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (u *User) GetName() string { - if u == nil || u.Name == nil { - return "" - } - return *u.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (u *User) GetNodeID() string { - if u == nil || u.NodeID == nil { - return "" - } - return *u.NodeID -} - -// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise. -func (u *User) GetOrganizationsURL() string { - if u == nil || u.OrganizationsURL == nil { - return "" - } - return *u.OrganizationsURL -} - -// GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise. -func (u *User) GetOwnedPrivateRepos() int { - if u == nil || u.OwnedPrivateRepos == nil { - return 0 - } - return *u.OwnedPrivateRepos -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (u *User) GetPermissions() map[string]bool { - if u == nil || u.Permissions == nil { - return map[string]bool{} - } - return *u.Permissions -} - -// GetPlan returns the Plan field. -func (u *User) GetPlan() *Plan { - if u == nil { - return nil - } - return u.Plan -} - -// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise. -func (u *User) GetPrivateGists() int { - if u == nil || u.PrivateGists == nil { - return 0 - } - return *u.PrivateGists -} - -// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise. -func (u *User) GetPublicGists() int { - if u == nil || u.PublicGists == nil { - return 0 - } - return *u.PublicGists -} - -// GetPublicRepos returns the PublicRepos field if it's non-nil, zero value otherwise. -func (u *User) GetPublicRepos() int { - if u == nil || u.PublicRepos == nil { - return 0 - } - return *u.PublicRepos -} - -// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise. -func (u *User) GetReceivedEventsURL() string { - if u == nil || u.ReceivedEventsURL == nil { - return "" - } - return *u.ReceivedEventsURL -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (u *User) GetReposURL() string { - if u == nil || u.ReposURL == nil { - return "" - } - return *u.ReposURL -} - -// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. -func (u *User) GetSiteAdmin() bool { - if u == nil || u.SiteAdmin == nil { - return false - } - return *u.SiteAdmin -} - -// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise. -func (u *User) GetStarredURL() string { - if u == nil || u.StarredURL == nil { - return "" - } - return *u.StarredURL -} - -// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise. -func (u *User) GetSubscriptionsURL() string { - if u == nil || u.SubscriptionsURL == nil { - return "" - } - return *u.SubscriptionsURL -} - -// GetSuspendedAt returns the SuspendedAt field if it's non-nil, zero value otherwise. -func (u *User) GetSuspendedAt() Timestamp { - if u == nil || u.SuspendedAt == nil { - return Timestamp{} - } - return *u.SuspendedAt -} - -// GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise. -func (u *User) GetTotalPrivateRepos() int { - if u == nil || u.TotalPrivateRepos == nil { - return 0 - } - return *u.TotalPrivateRepos -} - -// GetTwoFactorAuthentication returns the TwoFactorAuthentication field if it's non-nil, zero value otherwise. -func (u *User) GetTwoFactorAuthentication() bool { - if u == nil || u.TwoFactorAuthentication == nil { - return false - } - return *u.TwoFactorAuthentication -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (u *User) GetType() string { - if u == nil || u.Type == nil { - return "" - } - return *u.Type -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (u *User) GetUpdatedAt() Timestamp { - if u == nil || u.UpdatedAt == nil { - return Timestamp{} - } - return *u.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *User) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (u *UserContext) GetMessage() string { - if u == nil || u.Message == nil { - return "" - } - return *u.Message -} - -// GetOcticon returns the Octicon field if it's non-nil, zero value otherwise. -func (u *UserContext) GetOcticon() string { - if u == nil || u.Octicon == nil { - return "" - } - return *u.Octicon -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetEmail() string { - if u == nil || u.Email == nil { - return "" - } - return *u.Email -} - -// GetPrimary returns the Primary field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetPrimary() bool { - if u == nil || u.Primary == nil { - return false - } - return *u.Primary -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetVerified() bool { - if u == nil || u.Verified == nil { - return false - } - return *u.Verified -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetAvatarURL() string { - if u == nil || u.AvatarURL == nil { - return "" - } - return *u.AvatarURL -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetEventsURL() string { - if u == nil || u.EventsURL == nil { - return "" - } - return *u.EventsURL -} - -// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetFollowersURL() string { - if u == nil || u.FollowersURL == nil { - return "" - } - return *u.FollowersURL -} - -// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetFollowingURL() string { - if u == nil || u.FollowingURL == nil { - return "" - } - return *u.FollowingURL -} - -// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetGistsURL() string { - if u == nil || u.GistsURL == nil { - return "" - } - return *u.GistsURL -} - -// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetGravatarID() string { - if u == nil || u.GravatarID == nil { - return "" - } - return *u.GravatarID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetLDAPDN() string { - if u == nil || u.LDAPDN == nil { - return "" - } - return *u.LDAPDN -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetLogin() string { - if u == nil || u.Login == nil { - return "" - } - return *u.Login -} - -// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetOrganizationsURL() string { - if u == nil || u.OrganizationsURL == nil { - return "" - } - return *u.OrganizationsURL -} - -// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetReceivedEventsURL() string { - if u == nil || u.ReceivedEventsURL == nil { - return "" - } - return *u.ReceivedEventsURL -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetReposURL() string { - if u == nil || u.ReposURL == nil { - return "" - } - return *u.ReposURL -} - -// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetSiteAdmin() bool { - if u == nil || u.SiteAdmin == nil { - return false - } - return *u.SiteAdmin -} - -// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetStarredURL() string { - if u == nil || u.StarredURL == nil { - return "" - } - return *u.StarredURL -} - -// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetSubscriptionsURL() string { - if u == nil || u.SubscriptionsURL == nil { - return "" - } - return *u.SubscriptionsURL -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetType() string { - if u == nil || u.Type == nil { - return "" - } - return *u.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetCreatedAt() string { - if u == nil || u.CreatedAt == nil { - return "" - } - return *u.CreatedAt -} - -// GetExcludeAttachments returns the ExcludeAttachments field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetExcludeAttachments() bool { - if u == nil || u.ExcludeAttachments == nil { - return false - } - return *u.ExcludeAttachments -} - -// GetGUID returns the GUID field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetGUID() string { - if u == nil || u.GUID == nil { - return "" - } - return *u.GUID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetLockRepositories returns the LockRepositories field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetLockRepositories() bool { - if u == nil || u.LockRepositories == nil { - return false - } - return *u.LockRepositories -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetState() string { - if u == nil || u.State == nil { - return "" - } - return *u.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetUpdatedAt() string { - if u == nil || u.UpdatedAt == nil { - return "" - } - return *u.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (u *UsersSearchResult) GetIncompleteResults() bool { - if u == nil || u.IncompleteResults == nil { - return false - } - return *u.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (u *UsersSearchResult) GetTotal() int { - if u == nil || u.Total == nil { - return 0 - } - return *u.Total -} - -// GetAdminUsers returns the AdminUsers field if it's non-nil, zero value otherwise. -func (u *UserStats) GetAdminUsers() int { - if u == nil || u.AdminUsers == nil { - return 0 - } - return *u.AdminUsers -} - -// GetSuspendedUsers returns the SuspendedUsers field if it's non-nil, zero value otherwise. -func (u *UserStats) GetSuspendedUsers() int { - if u == nil || u.SuspendedUsers == nil { - return 0 - } - return *u.SuspendedUsers -} - -// GetTotalUsers returns the TotalUsers field if it's non-nil, zero value otherwise. -func (u *UserStats) GetTotalUsers() int { - if u == nil || u.TotalUsers == nil { - return 0 - } - return *u.TotalUsers -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (w *WatchEvent) GetAction() string { - if w == nil || w.Action == nil { - return "" - } - return *w.Action -} - -// GetInstallation returns the Installation field. -func (w *WatchEvent) GetInstallation() *Installation { - if w == nil { - return nil - } - return w.Installation -} - -// GetRepo returns the Repo field. -func (w *WatchEvent) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WatchEvent) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (w *WebHookAuthor) GetEmail() string { - if w == nil || w.Email == nil { - return "" - } - return *w.Email -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (w *WebHookAuthor) GetName() string { - if w == nil || w.Name == nil { - return "" - } - return *w.Name -} - -// GetUsername returns the Username field if it's non-nil, zero value otherwise. -func (w *WebHookAuthor) GetUsername() string { - if w == nil || w.Username == nil { - return "" - } - return *w.Username -} - -// GetAuthor returns the Author field. -func (w *WebHookCommit) GetAuthor() *WebHookAuthor { - if w == nil { - return nil - } - return w.Author -} - -// GetCommitter returns the Committer field. -func (w *WebHookCommit) GetCommitter() *WebHookAuthor { - if w == nil { - return nil - } - return w.Committer -} - -// GetDistinct returns the Distinct field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetDistinct() bool { - if w == nil || w.Distinct == nil { - return false - } - return *w.Distinct -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetID() string { - if w == nil || w.ID == nil { - return "" - } - return *w.ID -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetMessage() string { - if w == nil || w.Message == nil { - return "" - } - return *w.Message -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetTimestamp() time.Time { - if w == nil || w.Timestamp == nil { - return time.Time{} - } - return *w.Timestamp -} - -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetAfter() string { - if w == nil || w.After == nil { - return "" - } - return *w.After -} - -// GetBefore returns the Before field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetBefore() string { - if w == nil || w.Before == nil { - return "" - } - return *w.Before -} - -// GetCompare returns the Compare field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetCompare() string { - if w == nil || w.Compare == nil { - return "" - } - return *w.Compare -} - -// GetCreated returns the Created field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetCreated() bool { - if w == nil || w.Created == nil { - return false - } - return *w.Created -} - -// GetDeleted returns the Deleted field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetDeleted() bool { - if w == nil || w.Deleted == nil { - return false - } - return *w.Deleted -} - -// GetForced returns the Forced field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetForced() bool { - if w == nil || w.Forced == nil { - return false - } - return *w.Forced -} - -// GetHeadCommit returns the HeadCommit field. -func (w *WebHookPayload) GetHeadCommit() *WebHookCommit { - if w == nil { - return nil - } - return w.HeadCommit -} - -// GetPusher returns the Pusher field. -func (w *WebHookPayload) GetPusher() *User { - if w == nil { - return nil - } - return w.Pusher -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetRef() string { - if w == nil || w.Ref == nil { - return "" - } - return *w.Ref -} - -// GetRepo returns the Repo field. -func (w *WebHookPayload) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WebHookPayload) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (w *WeeklyCommitActivity) GetTotal() int { - if w == nil || w.Total == nil { - return 0 - } - return *w.Total -} - -// GetWeek returns the Week field if it's non-nil, zero value otherwise. -func (w *WeeklyCommitActivity) GetWeek() Timestamp { - if w == nil || w.Week == nil { - return Timestamp{} - } - return *w.Week -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetAdditions() int { - if w == nil || w.Additions == nil { - return 0 - } - return *w.Additions -} - -// GetCommits returns the Commits field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetCommits() int { - if w == nil || w.Commits == nil { - return 0 - } - return *w.Commits -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetDeletions() int { - if w == nil || w.Deletions == nil { - return 0 - } - return *w.Deletions -} - -// GetWeek returns the Week field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetWeek() Timestamp { - if w == nil || w.Week == nil { - return Timestamp{} - } - return *w.Week -} diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go deleted file mode 100644 index 19ffecc7d260..000000000000 --- a/vendor/github.com/google/go-github/github/github.go +++ /dev/null @@ -1,1012 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen-accessors.go - -package github - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/go-querystring/query" -) - -const ( - defaultBaseURL = "https://api.github.com/" - uploadBaseURL = "https://uploads.github.com/" - userAgent = "go-github" - - headerRateLimit = "X-RateLimit-Limit" - headerRateRemaining = "X-RateLimit-Remaining" - headerRateReset = "X-RateLimit-Reset" - headerOTP = "X-GitHub-OTP" - - mediaTypeV3 = "application/vnd.github.v3+json" - defaultMediaType = "application/octet-stream" - mediaTypeV3SHA = "application/vnd.github.v3.sha" - mediaTypeV3Diff = "application/vnd.github.v3.diff" - mediaTypeV3Patch = "application/vnd.github.v3.patch" - mediaTypeOrgPermissionRepo = "application/vnd.github.v3.repository+json" - - // Media Type values to access preview APIs - - // https://developer.github.com/changes/2014-12-09-new-attributes-for-stars-api/ - mediaTypeStarringPreview = "application/vnd.github.v3.star+json" - - // https://help.github.com/enterprise/2.4/admin/guides/migrations/exporting-the-github-com-organization-s-repositories/ - mediaTypeMigrationsPreview = "application/vnd.github.wyandotte-preview+json" - - // https://developer.github.com/changes/2016-04-06-deployment-and-deployment-status-enhancements/ - mediaTypeDeploymentStatusPreview = "application/vnd.github.ant-man-preview+json" - - // https://developer.github.com/changes/2018-10-16-deployments-environments-states-and-auto-inactive-updates/ - mediaTypeExpandDeploymentStatusPreview = "application/vnd.github.flash-preview+json" - - // https://developer.github.com/changes/2016-02-19-source-import-preview-api/ - mediaTypeImportPreview = "application/vnd.github.barred-rock-preview" - - // https://developer.github.com/changes/2016-05-12-reactions-api-preview/ - mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview" - - // https://developer.github.com/changes/2016-05-23-timeline-preview-api/ - mediaTypeTimelinePreview = "application/vnd.github.mockingbird-preview+json" - - // https://developer.github.com/changes/2016-07-06-github-pages-preiew-api/ - mediaTypePagesPreview = "application/vnd.github.mister-fantastic-preview+json" - - // https://developer.github.com/changes/2016-09-14-projects-api/ - mediaTypeProjectsPreview = "application/vnd.github.inertia-preview+json" - - // https://developer.github.com/changes/2016-09-14-Integrations-Early-Access/ - mediaTypeIntegrationPreview = "application/vnd.github.machine-man-preview+json" - - // https://developer.github.com/changes/2017-01-05-commit-search-api/ - mediaTypeCommitSearchPreview = "application/vnd.github.cloak-preview+json" - - // https://developer.github.com/changes/2017-02-28-user-blocking-apis-and-webhook/ - mediaTypeBlockUsersPreview = "application/vnd.github.giant-sentry-fist-preview+json" - - // https://developer.github.com/changes/2017-02-09-community-health/ - mediaTypeRepositoryCommunityHealthMetricsPreview = "application/vnd.github.black-panther-preview+json" - - // https://developer.github.com/changes/2017-05-23-coc-api/ - mediaTypeCodesOfConductPreview = "application/vnd.github.scarlet-witch-preview+json" - - // https://developer.github.com/changes/2017-07-17-update-topics-on-repositories/ - mediaTypeTopicsPreview = "application/vnd.github.mercy-preview+json" - - // https://developer.github.com/changes/2017-08-30-preview-nested-teams/ - mediaTypeNestedTeamsPreview = "application/vnd.github.hellcat-preview+json" - - // https://developer.github.com/changes/2017-11-09-repository-transfer-api-preview/ - mediaTypeRepositoryTransferPreview = "application/vnd.github.nightshade-preview+json" - - // https://developer.github.com/changes/2018-01-25-organization-invitation-api-preview/ - mediaTypeOrganizationInvitationPreview = "application/vnd.github.dazzler-preview+json" - - // https://developer.github.com/changes/2018-03-16-protected-branches-required-approving-reviews/ - mediaTypeRequiredApprovingReviewsPreview = "application/vnd.github.luke-cage-preview+json" - - // https://developer.github.com/changes/2018-02-22-label-description-search-preview/ - mediaTypeLabelDescriptionSearchPreview = "application/vnd.github.symmetra-preview+json" - - // https://developer.github.com/changes/2018-02-07-team-discussions-api/ - mediaTypeTeamDiscussionsPreview = "application/vnd.github.echo-preview+json" - - // https://developer.github.com/changes/2018-03-21-hovercard-api-preview/ - mediaTypeHovercardPreview = "application/vnd.github.hagar-preview+json" - - // https://developer.github.com/changes/2018-01-10-lock-reason-api-preview/ - mediaTypeLockReasonPreview = "application/vnd.github.sailor-v-preview+json" - - // https://developer.github.com/changes/2018-05-07-new-checks-api-public-beta/ - mediaTypeCheckRunsPreview = "application/vnd.github.antiope-preview+json" - - // https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/ - mediaTypePreReceiveHooksPreview = "application/vnd.github.eye-scream-preview" - - // https://developer.github.com/changes/2018-02-22-protected-branches-required-signatures/ - mediaTypeSignaturePreview = "application/vnd.github.zzzax-preview+json" - - // https://developer.github.com/changes/2018-09-05-project-card-events/ - mediaTypeProjectCardDetailsPreview = "application/vnd.github.starfox-preview+json" -) - -// A Client manages communication with the GitHub API. -type Client struct { - clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func. - client *http.Client // HTTP client used to communicate with the API. - - // Base URL for API requests. Defaults to the public GitHub API, but can be - // set to a domain endpoint to use with GitHub Enterprise. BaseURL should - // always be specified with a trailing slash. - BaseURL *url.URL - - // Base URL for uploading files. - UploadURL *url.URL - - // User agent used when communicating with the GitHub API. - UserAgent string - - rateMu sync.Mutex - rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls. - - common service // Reuse a single struct instead of allocating one for each service on the heap. - - // Services used for talking to different parts of the GitHub API. - Activity *ActivityService - Admin *AdminService - Apps *AppsService - Authorizations *AuthorizationsService - Checks *ChecksService - Gists *GistsService - Git *GitService - Gitignores *GitignoresService - Issues *IssuesService - Licenses *LicensesService - Marketplace *MarketplaceService - Migrations *MigrationService - Organizations *OrganizationsService - Projects *ProjectsService - PullRequests *PullRequestsService - Reactions *ReactionsService - Repositories *RepositoriesService - Search *SearchService - Teams *TeamsService - Users *UsersService -} - -type service struct { - client *Client -} - -// ListOptions specifies the optional parameters to various List methods that -// support pagination. -type ListOptions struct { - // For paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty"` - - // For paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty"` -} - -// UploadOptions specifies the parameters to methods that support uploads. -type UploadOptions struct { - Name string `url:"name,omitempty"` -} - -// RawType represents type of raw format of a request instead of JSON. -type RawType uint8 - -const ( - // Diff format. - Diff RawType = 1 + iota - // Patch format. - Patch -) - -// RawOptions specifies parameters when user wants to get raw format of -// a response instead of JSON. -type RawOptions struct { - Type RawType -} - -// addOptions adds the parameters in opt as URL query parameters to s. opt -// must be a struct whose fields may contain "url" tags. -func addOptions(s string, opt interface{}) (string, error) { - v := reflect.ValueOf(opt) - if v.Kind() == reflect.Ptr && v.IsNil() { - return s, nil - } - - u, err := url.Parse(s) - if err != nil { - return s, err - } - - qs, err := query.Values(opt) - if err != nil { - return s, err - } - - u.RawQuery = qs.Encode() - return u.String(), nil -} - -// NewClient returns a new GitHub API client. If a nil httpClient is -// provided, http.DefaultClient will be used. To use API methods which require -// authentication, provide an http.Client that will perform the authentication -// for you (such as that provided by the golang.org/x/oauth2 library). -func NewClient(httpClient *http.Client) *Client { - if httpClient == nil { - httpClient = http.DefaultClient - } - baseURL, _ := url.Parse(defaultBaseURL) - uploadURL, _ := url.Parse(uploadBaseURL) - - c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL} - c.common.client = c - c.Activity = (*ActivityService)(&c.common) - c.Admin = (*AdminService)(&c.common) - c.Apps = (*AppsService)(&c.common) - c.Authorizations = (*AuthorizationsService)(&c.common) - c.Checks = (*ChecksService)(&c.common) - c.Gists = (*GistsService)(&c.common) - c.Git = (*GitService)(&c.common) - c.Gitignores = (*GitignoresService)(&c.common) - c.Issues = (*IssuesService)(&c.common) - c.Licenses = (*LicensesService)(&c.common) - c.Marketplace = &MarketplaceService{client: c} - c.Migrations = (*MigrationService)(&c.common) - c.Organizations = (*OrganizationsService)(&c.common) - c.Projects = (*ProjectsService)(&c.common) - c.PullRequests = (*PullRequestsService)(&c.common) - c.Reactions = (*ReactionsService)(&c.common) - c.Repositories = (*RepositoriesService)(&c.common) - c.Search = (*SearchService)(&c.common) - c.Teams = (*TeamsService)(&c.common) - c.Users = (*UsersService)(&c.common) - return c -} - -// NewEnterpriseClient returns a new GitHub API client with provided -// base URL and upload URL (often the same URL). -// If either URL does not have a trailing slash, one is added automatically. -// If a nil httpClient is provided, http.DefaultClient will be used. -// -// Note that NewEnterpriseClient is a convenience helper only; -// its behavior is equivalent to using NewClient, followed by setting -// the BaseURL and UploadURL fields. -func NewEnterpriseClient(baseURL, uploadURL string, httpClient *http.Client) (*Client, error) { - baseEndpoint, err := url.Parse(baseURL) - if err != nil { - return nil, err - } - if !strings.HasSuffix(baseEndpoint.Path, "/") { - baseEndpoint.Path += "/" - } - - uploadEndpoint, err := url.Parse(uploadURL) - if err != nil { - return nil, err - } - if !strings.HasSuffix(uploadEndpoint.Path, "/") { - uploadEndpoint.Path += "/" - } - - c := NewClient(httpClient) - c.BaseURL = baseEndpoint - c.UploadURL = uploadEndpoint - return c, nil -} - -// NewRequest creates an API request. A relative URL can be provided in urlStr, -// in which case it is resolved relative to the BaseURL of the Client. -// Relative URLs should always be specified without a preceding slash. If -// specified, the value pointed to by body is JSON encoded and included as the -// request body. -func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { - if !strings.HasSuffix(c.BaseURL.Path, "/") { - return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) - } - u, err := c.BaseURL.Parse(urlStr) - if err != nil { - return nil, err - } - - var buf io.ReadWriter - if body != nil { - buf = new(bytes.Buffer) - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err := enc.Encode(body) - if err != nil { - return nil, err - } - } - - req, err := http.NewRequest(method, u.String(), buf) - if err != nil { - return nil, err - } - - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - req.Header.Set("Accept", mediaTypeV3) - if c.UserAgent != "" { - req.Header.Set("User-Agent", c.UserAgent) - } - return req, nil -} - -// NewUploadRequest creates an upload request. A relative URL can be provided in -// urlStr, in which case it is resolved relative to the UploadURL of the Client. -// Relative URLs should always be specified without a preceding slash. -func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string) (*http.Request, error) { - if !strings.HasSuffix(c.UploadURL.Path, "/") { - return nil, fmt.Errorf("UploadURL must have a trailing slash, but %q does not", c.UploadURL) - } - u, err := c.UploadURL.Parse(urlStr) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u.String(), reader) - if err != nil { - return nil, err - } - req.ContentLength = size - - if mediaType == "" { - mediaType = defaultMediaType - } - req.Header.Set("Content-Type", mediaType) - req.Header.Set("Accept", mediaTypeV3) - req.Header.Set("User-Agent", c.UserAgent) - return req, nil -} - -// Response is a GitHub API response. This wraps the standard http.Response -// returned from GitHub and provides convenient access to things like -// pagination links. -type Response struct { - *http.Response - - // These fields provide the page values for paginating through a set of - // results. Any or all of these may be set to the zero value for - // responses that are not part of a paginated set, or for which there - // are no additional pages. - - NextPage int - PrevPage int - FirstPage int - LastPage int - - // Explicitly specify the Rate type so Rate's String() receiver doesn't - // propagate to Response. - Rate Rate -} - -// newResponse creates a new Response for the provided http.Response. -// r must not be nil. -func newResponse(r *http.Response) *Response { - response := &Response{Response: r} - response.populatePageValues() - response.Rate = parseRate(r) - return response -} - -// populatePageValues parses the HTTP Link response headers and populates the -// various pagination link values in the Response. -func (r *Response) populatePageValues() { - if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 { - for _, link := range strings.Split(links[0], ",") { - segments := strings.Split(strings.TrimSpace(link), ";") - - // link must at least have href and rel - if len(segments) < 2 { - continue - } - - // ensure href is properly formatted - if !strings.HasPrefix(segments[0], "<") || !strings.HasSuffix(segments[0], ">") { - continue - } - - // try to pull out page parameter - url, err := url.Parse(segments[0][1 : len(segments[0])-1]) - if err != nil { - continue - } - page := url.Query().Get("page") - if page == "" { - continue - } - - for _, segment := range segments[1:] { - switch strings.TrimSpace(segment) { - case `rel="next"`: - r.NextPage, _ = strconv.Atoi(page) - case `rel="prev"`: - r.PrevPage, _ = strconv.Atoi(page) - case `rel="first"`: - r.FirstPage, _ = strconv.Atoi(page) - case `rel="last"`: - r.LastPage, _ = strconv.Atoi(page) - } - - } - } - } -} - -// parseRate parses the rate related headers. -func parseRate(r *http.Response) Rate { - var rate Rate - if limit := r.Header.Get(headerRateLimit); limit != "" { - rate.Limit, _ = strconv.Atoi(limit) - } - if remaining := r.Header.Get(headerRateRemaining); remaining != "" { - rate.Remaining, _ = strconv.Atoi(remaining) - } - if reset := r.Header.Get(headerRateReset); reset != "" { - if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { - rate.Reset = Timestamp{time.Unix(v, 0)} - } - } - return rate -} - -// Do sends an API request and returns the API response. The API response is -// JSON decoded and stored in the value pointed to by v, or returned as an -// error if an API error has occurred. If v implements the io.Writer -// interface, the raw response body will be written to v, without attempting to -// first decode it. If rate limit is exceeded and reset time is in the future, -// Do returns *RateLimitError immediately without making a network API call. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { - req = withContext(ctx, req) - - rateLimitCategory := category(req.URL.Path) - - // If we've hit rate limit, don't make further requests before Reset time. - if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil { - return &Response{ - Response: err.Response, - Rate: err.Rate, - }, err - } - - resp, err := c.client.Do(req) - if err != nil { - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - // If the error type is *url.Error, sanitize its URL before returning. - if e, ok := err.(*url.Error); ok { - if url, err := url.Parse(e.URL); err == nil { - e.URL = sanitizeURL(url).String() - return nil, e - } - } - - return nil, err - } - defer resp.Body.Close() - - response := newResponse(resp) - - c.rateMu.Lock() - c.rateLimits[rateLimitCategory] = response.Rate - c.rateMu.Unlock() - - err = CheckResponse(resp) - if err != nil { - // Special case for AcceptedErrors. If an AcceptedError - // has been encountered, the response's payload will be - // added to the AcceptedError and returned. - // - // Issue #1022 - aerr, ok := err.(*AcceptedError) - if ok { - b, readErr := ioutil.ReadAll(resp.Body) - if readErr != nil { - return response, readErr - } - - aerr.Raw = b - return response, aerr - } - - return response, err - } - - if v != nil { - if w, ok := v.(io.Writer); ok { - io.Copy(w, resp.Body) - } else { - decErr := json.NewDecoder(resp.Body).Decode(v) - if decErr == io.EOF { - decErr = nil // ignore EOF errors caused by empty response body - } - if decErr != nil { - err = decErr - } - } - } - - return response, err -} - -// checkRateLimitBeforeDo does not make any network calls, but uses existing knowledge from -// current client state in order to quickly check if *RateLimitError can be immediately returned -// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily. -// Otherwise it returns nil, and Client.Do should proceed normally. -func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) *RateLimitError { - c.rateMu.Lock() - rate := c.rateLimits[rateLimitCategory] - c.rateMu.Unlock() - if !rate.Reset.Time.IsZero() && rate.Remaining == 0 && time.Now().Before(rate.Reset.Time) { - // Create a fake response. - resp := &http.Response{ - Status: http.StatusText(http.StatusForbidden), - StatusCode: http.StatusForbidden, - Request: req, - Header: make(http.Header), - Body: ioutil.NopCloser(strings.NewReader("")), - } - return &RateLimitError{ - Rate: rate, - Response: resp, - Message: fmt.Sprintf("API rate limit of %v still exceeded until %v, not making remote request.", rate.Limit, rate.Reset.Time), - } - } - - return nil -} - -/* -An ErrorResponse reports one or more errors caused by an API request. - -GitHub API docs: https://developer.github.com/v3/#client-errors -*/ -type ErrorResponse struct { - Response *http.Response // HTTP response that caused this error - Message string `json:"message"` // error message - Errors []Error `json:"errors"` // more detail on individual errors - // Block is only populated on certain types of errors such as code 451. - // See https://developer.github.com/changes/2016-03-17-the-451-status-code-is-now-supported/ - // for more information. - Block *struct { - Reason string `json:"reason,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - } `json:"block,omitempty"` - // Most errors will also include a documentation_url field pointing - // to some content that might help you resolve the error, see - // https://developer.github.com/v3/#client-errors - DocumentationURL string `json:"documentation_url,omitempty"` -} - -func (r *ErrorResponse) Error() string { - return fmt.Sprintf("%v %v: %d %v %+v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message, r.Errors) -} - -// TwoFactorAuthError occurs when using HTTP Basic Authentication for a user -// that has two-factor authentication enabled. The request can be reattempted -// by providing a one-time password in the request. -type TwoFactorAuthError ErrorResponse - -func (r *TwoFactorAuthError) Error() string { return (*ErrorResponse)(r).Error() } - -// RateLimitError occurs when GitHub returns 403 Forbidden response with a rate limit -// remaining value of 0, and error message starts with "API rate limit exceeded for ". -type RateLimitError struct { - Rate Rate // Rate specifies last known rate limit for the client - Response *http.Response // HTTP response that caused this error - Message string `json:"message"` // error message -} - -func (r *RateLimitError) Error() string { - return fmt.Sprintf("%v %v: %d %v %v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message, formatRateReset(r.Rate.Reset.Time.Sub(time.Now()))) -} - -// AcceptedError occurs when GitHub returns 202 Accepted response with an -// empty body, which means a job was scheduled on the GitHub side to process -// the information needed and cache it. -// Technically, 202 Accepted is not a real error, it's just used to -// indicate that results are not ready yet, but should be available soon. -// The request can be repeated after some time. -type AcceptedError struct { - // Raw contains the response body. - Raw []byte -} - -func (*AcceptedError) Error() string { - return "job scheduled on GitHub side; try again later" -} - -// AbuseRateLimitError occurs when GitHub returns 403 Forbidden response with the -// "documentation_url" field value equal to "https://developer.github.com/v3/#abuse-rate-limits". -type AbuseRateLimitError struct { - Response *http.Response // HTTP response that caused this error - Message string `json:"message"` // error message - - // RetryAfter is provided with some abuse rate limit errors. If present, - // it is the amount of time that the client should wait before retrying. - // Otherwise, the client should try again later (after an unspecified amount of time). - RetryAfter *time.Duration -} - -func (r *AbuseRateLimitError) Error() string { - return fmt.Sprintf("%v %v: %d %v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message) -} - -// sanitizeURL redacts the client_secret parameter from the URL which may be -// exposed to the user. -func sanitizeURL(uri *url.URL) *url.URL { - if uri == nil { - return nil - } - params := uri.Query() - if len(params.Get("client_secret")) > 0 { - params.Set("client_secret", "REDACTED") - uri.RawQuery = params.Encode() - } - return uri -} - -/* -An Error reports more details on an individual error in an ErrorResponse. -These are the possible validation error codes: - - missing: - resource does not exist - missing_field: - a required field on a resource has not been set - invalid: - the formatting of a field is invalid - already_exists: - another resource has the same valid as this field - custom: - some resources return this (e.g. github.User.CreateKey()), additional - information is set in the Message field of the Error - -GitHub API docs: https://developer.github.com/v3/#client-errors -*/ -type Error struct { - Resource string `json:"resource"` // resource on which the error occurred - Field string `json:"field"` // field on which the error occurred - Code string `json:"code"` // validation error code - Message string `json:"message"` // Message describing the error. Errors with Code == "custom" will always have this set. -} - -func (e *Error) Error() string { - return fmt.Sprintf("%v error caused by %v field on %v resource", - e.Code, e.Field, e.Resource) -} - -// CheckResponse checks the API response for errors, and returns them if -// present. A response is considered an error if it has a status code outside -// the 200 range or equal to 202 Accepted. -// API error responses are expected to have either no response -// body, or a JSON response body that maps to ErrorResponse. Any other -// response body will be silently ignored. -// -// The error type will be *RateLimitError for rate limit exceeded errors, -// *AcceptedError for 202 Accepted status codes, -// and *TwoFactorAuthError for two-factor authentication errors. -func CheckResponse(r *http.Response) error { - if r.StatusCode == http.StatusAccepted { - return &AcceptedError{} - } - if c := r.StatusCode; 200 <= c && c <= 299 { - return nil - } - errorResponse := &ErrorResponse{Response: r} - data, err := ioutil.ReadAll(r.Body) - if err == nil && data != nil { - json.Unmarshal(data, errorResponse) - } - switch { - case r.StatusCode == http.StatusUnauthorized && strings.HasPrefix(r.Header.Get(headerOTP), "required"): - return (*TwoFactorAuthError)(errorResponse) - case r.StatusCode == http.StatusForbidden && r.Header.Get(headerRateRemaining) == "0" && strings.HasPrefix(errorResponse.Message, "API rate limit exceeded for "): - return &RateLimitError{ - Rate: parseRate(r), - Response: errorResponse.Response, - Message: errorResponse.Message, - } - case r.StatusCode == http.StatusForbidden && strings.HasSuffix(errorResponse.DocumentationURL, "/v3/#abuse-rate-limits"): - abuseRateLimitError := &AbuseRateLimitError{ - Response: errorResponse.Response, - Message: errorResponse.Message, - } - if v := r.Header["Retry-After"]; len(v) > 0 { - // According to GitHub support, the "Retry-After" header value will be - // an integer which represents the number of seconds that one should - // wait before resuming making requests. - retryAfterSeconds, _ := strconv.ParseInt(v[0], 10, 64) // Error handling is noop. - retryAfter := time.Duration(retryAfterSeconds) * time.Second - abuseRateLimitError.RetryAfter = &retryAfter - } - return abuseRateLimitError - default: - return errorResponse - } -} - -// parseBoolResponse determines the boolean result from a GitHub API response. -// Several GitHub API methods return boolean responses indicated by the HTTP -// status code in the response (true indicated by a 204, false indicated by a -// 404). This helper function will determine that result and hide the 404 -// error if present. Any other error will be returned through as-is. -func parseBoolResponse(err error) (bool, error) { - if err == nil { - return true, nil - } - - if err, ok := err.(*ErrorResponse); ok && err.Response.StatusCode == http.StatusNotFound { - // Simply false. In this one case, we do not pass the error through. - return false, nil - } - - // some other real error occurred - return false, err -} - -// Rate represents the rate limit for the current client. -type Rate struct { - // The number of requests per hour the client is currently limited to. - Limit int `json:"limit"` - - // The number of remaining requests the client can make this hour. - Remaining int `json:"remaining"` - - // The time at which the current rate limit will reset. - Reset Timestamp `json:"reset"` -} - -func (r Rate) String() string { - return Stringify(r) -} - -// RateLimits represents the rate limits for the current client. -type RateLimits struct { - // The rate limit for non-search API requests. Unauthenticated - // requests are limited to 60 per hour. Authenticated requests are - // limited to 5,000 per hour. - // - // GitHub API docs: https://developer.github.com/v3/#rate-limiting - Core *Rate `json:"core"` - - // The rate limit for search API requests. Unauthenticated requests - // are limited to 10 requests per minutes. Authenticated requests are - // limited to 30 per minute. - // - // GitHub API docs: https://developer.github.com/v3/search/#rate-limit - Search *Rate `json:"search"` -} - -func (r RateLimits) String() string { - return Stringify(r) -} - -type rateLimitCategory uint8 - -const ( - coreCategory rateLimitCategory = iota - searchCategory - - categories // An array of this length will be able to contain all rate limit categories. -) - -// category returns the rate limit category of the endpoint, determined by Request.URL.Path. -func category(path string) rateLimitCategory { - switch { - default: - return coreCategory - case strings.HasPrefix(path, "/search/"): - return searchCategory - } -} - -// RateLimits returns the rate limits for the current client. -func (c *Client) RateLimits(ctx context.Context) (*RateLimits, *Response, error) { - req, err := c.NewRequest("GET", "rate_limit", nil) - if err != nil { - return nil, nil, err - } - - response := new(struct { - Resources *RateLimits `json:"resources"` - }) - resp, err := c.Do(ctx, req, response) - if err != nil { - return nil, nil, err - } - - if response.Resources != nil { - c.rateMu.Lock() - if response.Resources.Core != nil { - c.rateLimits[coreCategory] = *response.Resources.Core - } - if response.Resources.Search != nil { - c.rateLimits[searchCategory] = *response.Resources.Search - } - c.rateMu.Unlock() - } - - return response.Resources, resp, nil -} - -/* -UnauthenticatedRateLimitedTransport allows you to make unauthenticated calls -that need to use a higher rate limit associated with your OAuth application. - - t := &github.UnauthenticatedRateLimitedTransport{ - ClientID: "your app's client ID", - ClientSecret: "your app's client secret", - } - client := github.NewClient(t.Client()) - -This will append the querystring params client_id=xxx&client_secret=yyy to all -requests. - -See https://developer.github.com/v3/#unauthenticated-rate-limited-requests for -more information. -*/ -type UnauthenticatedRateLimitedTransport struct { - // ClientID is the GitHub OAuth client ID of the current application, which - // can be found by selecting its entry in the list at - // https://github.com/settings/applications. - ClientID string - - // ClientSecret is the GitHub OAuth client secret of the current - // application. - ClientSecret string - - // Transport is the underlying HTTP transport to use when making requests. - // It will default to http.DefaultTransport if nil. - Transport http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface. -func (t *UnauthenticatedRateLimitedTransport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.ClientID == "" { - return nil, errors.New("t.ClientID is empty") - } - if t.ClientSecret == "" { - return nil, errors.New("t.ClientSecret is empty") - } - - // To set extra querystring params, we must make a copy of the Request so - // that we don't modify the Request we were given. This is required by the - // specification of http.RoundTripper. - // - // Since we are going to modify only req.URL here, we only need a deep copy - // of req.URL. - req2 := new(http.Request) - *req2 = *req - req2.URL = new(url.URL) - *req2.URL = *req.URL - - q := req2.URL.Query() - q.Set("client_id", t.ClientID) - q.Set("client_secret", t.ClientSecret) - req2.URL.RawQuery = q.Encode() - - // Make the HTTP request. - return t.transport().RoundTrip(req2) -} - -// Client returns an *http.Client that makes requests which are subject to the -// rate limit of your OAuth application. -func (t *UnauthenticatedRateLimitedTransport) Client() *http.Client { - return &http.Client{Transport: t} -} - -func (t *UnauthenticatedRateLimitedTransport) transport() http.RoundTripper { - if t.Transport != nil { - return t.Transport - } - return http.DefaultTransport -} - -// BasicAuthTransport is an http.RoundTripper that authenticates all requests -// using HTTP Basic Authentication with the provided username and password. It -// additionally supports users who have two-factor authentication enabled on -// their GitHub account. -type BasicAuthTransport struct { - Username string // GitHub username - Password string // GitHub password - OTP string // one-time password for users with two-factor auth enabled - - // Transport is the underlying HTTP transport to use when making requests. - // It will default to http.DefaultTransport if nil. - Transport http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface. -func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { - // To set extra headers, we must make a copy of the Request so - // that we don't modify the Request we were given. This is required by the - // specification of http.RoundTripper. - // - // Since we are going to modify only req.Header here, we only need a deep copy - // of req.Header. - req2 := new(http.Request) - *req2 = *req - req2.Header = make(http.Header, len(req.Header)) - for k, s := range req.Header { - req2.Header[k] = append([]string(nil), s...) - } - - req2.SetBasicAuth(t.Username, t.Password) - if t.OTP != "" { - req2.Header.Set(headerOTP, t.OTP) - } - return t.transport().RoundTrip(req2) -} - -// Client returns an *http.Client that makes requests that are authenticated -// using HTTP Basic Authentication. -func (t *BasicAuthTransport) Client() *http.Client { - return &http.Client{Transport: t} -} - -func (t *BasicAuthTransport) transport() http.RoundTripper { - if t.Transport != nil { - return t.Transport - } - return http.DefaultTransport -} - -// formatRateReset formats d to look like "[rate reset in 2s]" or -// "[rate reset in 87m02s]" for the positive durations. And like "[rate limit was reset 87m02s ago]" -// for the negative cases. -func formatRateReset(d time.Duration) string { - isNegative := d < 0 - if isNegative { - d *= -1 - } - secondsTotal := int(0.5 + d.Seconds()) - minutes := secondsTotal / 60 - seconds := secondsTotal - minutes*60 - - var timeString string - if minutes > 0 { - timeString = fmt.Sprintf("%dm%02ds", minutes, seconds) - } else { - timeString = fmt.Sprintf("%ds", seconds) - } - - if isNegative { - return fmt.Sprintf("[rate limit was reset %v ago]", timeString) - } - return fmt.Sprintf("[rate reset in %v]", timeString) -} - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int is a helper routine that allocates a new int value -// to store v and returns a pointer to it. -func Int(v int) *int { return &v } - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/github.com/google/go-github/github/gitignore.go b/vendor/github.com/google/go-github/github/gitignore.go deleted file mode 100644 index 2f691bc323e5..000000000000 --- a/vendor/github.com/google/go-github/github/gitignore.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GitignoresService provides access to the gitignore related functions in the -// GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/gitignore/ -type GitignoresService service - -// Gitignore represents a .gitignore file as returned by the GitHub API. -type Gitignore struct { - Name *string `json:"name,omitempty"` - Source *string `json:"source,omitempty"` -} - -func (g Gitignore) String() string { - return Stringify(g) -} - -// List all available Gitignore templates. -// -// GitHub API docs: https://developer.github.com/v3/gitignore/#listing-available-templates -func (s GitignoresService) List(ctx context.Context) ([]string, *Response, error) { - req, err := s.client.NewRequest("GET", "gitignore/templates", nil) - if err != nil { - return nil, nil, err - } - - var availableTemplates []string - resp, err := s.client.Do(ctx, req, &availableTemplates) - if err != nil { - return nil, resp, err - } - - return availableTemplates, resp, nil -} - -// Get a Gitignore by name. -// -// GitHub API docs: https://developer.github.com/v3/gitignore/#get-a-single-template -func (s GitignoresService) Get(ctx context.Context, name string) (*Gitignore, *Response, error) { - u := fmt.Sprintf("gitignore/templates/%v", name) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gitignore := new(Gitignore) - resp, err := s.client.Do(ctx, req, gitignore) - if err != nil { - return nil, resp, err - } - - return gitignore, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go deleted file mode 100644 index 1e0991ce4ff6..000000000000 --- a/vendor/github.com/google/go-github/github/issues.go +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" - "time" -) - -// IssuesService handles communication with the issue related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/issues/ -type IssuesService service - -// Issue represents a GitHub issue on a repository. -// -// Note: As far as the GitHub API is concerned, every pull request is an issue, -// but not every issue is a pull request. Some endpoints, events, and webhooks -// may also return pull requests via this struct. If PullRequestLinks is nil, -// this is an issue, and if PullRequestLinks is not nil, this is a pull request. -// The IsPullRequest helper method can be used to check that. -type Issue struct { - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Locked *bool `json:"locked,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - Labels []Label `json:"labels,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Comments *int `json:"comments,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - ClosedBy *User `json:"closed_by,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"` - Repository *Repository `json:"repository,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - Assignees []*User `json:"assignees,omitempty"` - NodeID *string `json:"node_id,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://developer.github.com/v3/search/#text-match-metadata - TextMatches []TextMatch `json:"text_matches,omitempty"` - - // ActiveLockReason is populated only when LockReason is provided while locking the issue. - // Possible values are: "off-topic", "too heated", "resolved", and "spam". - ActiveLockReason *string `json:"active_lock_reason,omitempty"` -} - -func (i Issue) String() string { - return Stringify(i) -} - -// IsPullRequest reports whether the issue is also a pull request. It uses the -// method recommended by GitHub's API documentation, which is to check whether -// PullRequestLinks is non-nil. -func (i Issue) IsPullRequest() bool { - return i.PullRequestLinks != nil -} - -// IssueRequest represents a request to create/edit an issue. -// It is separate from Issue above because otherwise Labels -// and Assignee fail to serialize to the correct JSON. -type IssueRequest struct { - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - Labels *[]string `json:"labels,omitempty"` - Assignee *string `json:"assignee,omitempty"` - State *string `json:"state,omitempty"` - Milestone *int `json:"milestone,omitempty"` - Assignees *[]string `json:"assignees,omitempty"` -} - -// IssueListOptions specifies the optional parameters to the IssuesService.List -// and IssuesService.ListByOrg methods. -type IssueListOptions struct { - // Filter specifies which issues to list. Possible values are: assigned, - // created, mentioned, subscribed, all. Default is "assigned". - Filter string `url:"filter,omitempty"` - - // State filters issues based on their state. Possible values are: open, - // closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Labels filters issues based on their label. - Labels []string `url:"labels,comma,omitempty"` - - // Sort specifies how to sort issues. Possible values are: created, updated, - // and comments. Default value is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort issues. Possible values are: asc, desc. - // Default is "desc". - Direction string `url:"direction,omitempty"` - - // Since filters issues by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// PullRequestLinks object is added to the Issue object when it's an issue included -// in the IssueCommentEvent webhook payload, if the webhook is fired by a comment on a PR. -type PullRequestLinks struct { - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` -} - -// List the issues for the authenticated user. If all is true, list issues -// across all the user's visible repositories including owned, member, and -// organization repositories; if false, list only owned and member -// repositories. -// -// GitHub API docs: https://developer.github.com/v3/issues/#list-issues -func (s *IssuesService) List(ctx context.Context, all bool, opt *IssueListOptions) ([]*Issue, *Response, error) { - var u string - if all { - u = "issues" - } else { - u = "user/issues" - } - return s.listIssues(ctx, u, opt) -} - -// ListByOrg fetches the issues in the specified organization for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/issues/#list-issues -func (s *IssuesService) ListByOrg(ctx context.Context, org string, opt *IssueListOptions) ([]*Issue, *Response, error) { - u := fmt.Sprintf("orgs/%v/issues", org) - return s.listIssues(ctx, u, opt) -} - -func (s *IssuesService) listIssues(ctx context.Context, u string, opt *IssueListOptions) ([]*Issue, *Response, error) { - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeReactionsPreview, mediaTypeLabelDescriptionSearchPreview, mediaTypeLockReasonPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var issues []*Issue - resp, err := s.client.Do(ctx, req, &issues) - if err != nil { - return nil, resp, err - } - - return issues, resp, nil -} - -// IssueListByRepoOptions specifies the optional parameters to the -// IssuesService.ListByRepo method. -type IssueListByRepoOptions struct { - // Milestone limits issues for the specified milestone. Possible values are - // a milestone number, "none" for issues with no milestone, "*" for issues - // with any milestone. - Milestone string `url:"milestone,omitempty"` - - // State filters issues based on their state. Possible values are: open, - // closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Assignee filters issues based on their assignee. Possible values are a - // user name, "none" for issues that are not assigned, "*" for issues with - // any assigned user. - Assignee string `url:"assignee,omitempty"` - - // Creator filters issues based on their creator. - Creator string `url:"creator,omitempty"` - - // Mentioned filters issues to those mentioned a specific user. - Mentioned string `url:"mentioned,omitempty"` - - // Labels filters issues based on their label. - Labels []string `url:"labels,omitempty,comma"` - - // Sort specifies how to sort issues. Possible values are: created, updated, - // and comments. Default value is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort issues. Possible values are: asc, desc. - // Default is "desc". - Direction string `url:"direction,omitempty"` - - // Since filters issues by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListByRepo lists the issues for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/#list-issues-for-a-repository -func (s *IssuesService) ListByRepo(ctx context.Context, owner string, repo string, opt *IssueListByRepoOptions) ([]*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeReactionsPreview, mediaTypeLabelDescriptionSearchPreview, mediaTypeIntegrationPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var issues []*Issue - resp, err := s.client.Do(ctx, req, &issues) - if err != nil { - return nil, resp, err - } - - return issues, resp, nil -} - -// Get a single issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/#get-a-single-issue -func (s *IssuesService) Get(ctx context.Context, owner string, repo string, number int) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeReactionsPreview, mediaTypeLabelDescriptionSearchPreview, mediaTypeLockReasonPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - issue := new(Issue) - resp, err := s.client.Do(ctx, req, issue) - if err != nil { - return nil, resp, err - } - - return issue, resp, nil -} - -// Create a new issue on the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/#create-an-issue -func (s *IssuesService) Create(ctx context.Context, owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) - req, err := s.client.NewRequest("POST", u, issue) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - i := new(Issue) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// Edit an issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/#edit-an-issue -func (s *IssuesService) Edit(ctx context.Context, owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, issue) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - i := new(Issue) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// LockIssueOptions specifies the optional parameters to the -// IssuesService.Lock method. -type LockIssueOptions struct { - // LockReason specifies the reason to lock this issue. - // Providing a lock reason can help make it clearer to contributors why an issue - // was locked. Possible values are: "off-topic", "too heated", "resolved", and "spam". - LockReason string `json:"lock_reason,omitempty"` -} - -// Lock an issue's conversation. -// -// GitHub API docs: https://developer.github.com/v3/issues/#lock-an-issue -func (s *IssuesService) Lock(ctx context.Context, owner string, repo string, number int, opt *LockIssueOptions) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, err - } - - if opt != nil { - req.Header.Set("Accept", mediaTypeLockReasonPreview) - } - - return s.client.Do(ctx, req, nil) -} - -// Unlock an issue's conversation. -// -// GitHub API docs: https://developer.github.com/v3/issues/#unlock-an-issue -func (s *IssuesService) Unlock(ctx context.Context, owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/issues_assignees.go b/vendor/github.com/google/go-github/github/issues_assignees.go deleted file mode 100644 index 9cb366f50a32..000000000000 --- a/vendor/github.com/google/go-github/github/issues_assignees.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListAssignees fetches all available assignees (owners and collaborators) to -// which issues may be assigned. -// -// GitHub API docs: https://developer.github.com/v3/issues/assignees/#list-assignees -func (s *IssuesService) ListAssignees(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - var assignees []*User - resp, err := s.client.Do(ctx, req, &assignees) - if err != nil { - return nil, resp, err - } - - return assignees, resp, nil -} - -// IsAssignee checks if a user is an assignee for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/assignees/#check-assignee -func (s *IssuesService) IsAssignee(ctx context.Context, owner, repo, user string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - resp, err := s.client.Do(ctx, req, nil) - assignee, err := parseBoolResponse(err) - return assignee, resp, err -} - -// AddAssignees adds the provided GitHub users as assignees to the issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/assignees/#add-assignees-to-an-issue -func (s *IssuesService) AddAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) { - users := &struct { - Assignees []string `json:"assignees,omitempty"` - }{Assignees: assignees} - u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number) - req, err := s.client.NewRequest("POST", u, users) - if err != nil { - return nil, nil, err - } - - issue := &Issue{} - resp, err := s.client.Do(ctx, req, issue) - return issue, resp, err -} - -// RemoveAssignees removes the provided GitHub users as assignees from the issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/assignees/#remove-assignees-from-an-issue -func (s *IssuesService) RemoveAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) { - users := &struct { - Assignees []string `json:"assignees,omitempty"` - }{Assignees: assignees} - u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, users) - if err != nil { - return nil, nil, err - } - - issue := &Issue{} - resp, err := s.client.Do(ctx, req, issue) - return issue, resp, err -} diff --git a/vendor/github.com/google/go-github/github/issues_comments.go b/vendor/github.com/google/go-github/github/issues_comments.go deleted file mode 100644 index ab68afe2fa47..000000000000 --- a/vendor/github.com/google/go-github/github/issues_comments.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// IssueComment represents a comment left on an issue. -type IssueComment struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - // AuthorAssociation is the comment author's relationship to the issue's repository. - // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". - AuthorAssociation *string `json:"author_association,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - IssueURL *string `json:"issue_url,omitempty"` -} - -func (i IssueComment) String() string { - return Stringify(i) -} - -// IssueListCommentsOptions specifies the optional parameters to the -// IssuesService.ListComments method. -type IssueListCommentsOptions struct { - // Sort specifies how to sort comments. Possible values are: created, updated. - Sort string `url:"sort,omitempty"` - - // Direction in which to sort comments. Possible values are: asc, desc. - Direction string `url:"direction,omitempty"` - - // Since filters comments by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListComments lists all comments on the specified issue. Specifying an issue -// number of 0 will return all comments on all issues for the repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue -func (s *IssuesService) ListComments(ctx context.Context, owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) { - var u string - if number == 0 { - u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo) - } else { - u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*IssueComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment fetches the specified issue comment. -// -// GitHub API docs: https://developer.github.com/v3/issues/comments/#get-a-single-comment -func (s *IssuesService) GetComment(ctx context.Context, owner string, repo string, commentID int64) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - comment := new(IssueComment) - resp, err := s.client.Do(ctx, req, comment) - if err != nil { - return nil, resp, err - } - - return comment, resp, nil -} - -// CreateComment creates a new comment on the specified issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/comments/#create-a-comment -func (s *IssuesService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - c := new(IssueComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// EditComment updates an issue comment. -// A non-nil comment.Body must be provided. Other comment fields should be left nil. -// -// GitHub API docs: https://developer.github.com/v3/issues/comments/#edit-a-comment -func (s *IssuesService) EditComment(ctx context.Context, owner string, repo string, commentID int64, comment *IssueComment) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - c := new(IssueComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes an issue comment. -// -// GitHub API docs: https://developer.github.com/v3/issues/comments/#delete-a-comment -func (s *IssuesService) DeleteComment(ctx context.Context, owner string, repo string, commentID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/issues_events.go b/vendor/github.com/google/go-github/github/issues_events.go deleted file mode 100644 index 6a43f1062d79..000000000000 --- a/vendor/github.com/google/go-github/github/issues_events.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" - "time" -) - -// IssueEvent represents an event that occurred around an Issue or Pull Request. -type IssueEvent struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - - // The User that generated this event. - Actor *User `json:"actor,omitempty"` - - // Event identifies the actual type of Event that occurred. Possible - // values are: - // - // closed - // The Actor closed the issue. - // If the issue was closed by commit message, CommitID holds the SHA1 hash of the commit. - // - // merged - // The Actor merged into master a branch containing a commit mentioning the issue. - // CommitID holds the SHA1 of the merge commit. - // - // referenced - // The Actor committed to master a commit mentioning the issue in its commit message. - // CommitID holds the SHA1 of the commit. - // - // reopened, unlocked - // The Actor did that to the issue. - // - // locked - // The Actor locked the issue. - // LockReason holds the reason of locking the issue (if provided while locking). - // - // renamed - // The Actor changed the issue title from Rename.From to Rename.To. - // - // mentioned - // Someone unspecified @mentioned the Actor [sic] in an issue comment body. - // - // assigned, unassigned - // The Assigner assigned the issue to or removed the assignment from the Assignee. - // - // labeled, unlabeled - // The Actor added or removed the Label from the issue. - // - // milestoned, demilestoned - // The Actor added or removed the issue from the Milestone. - // - // subscribed, unsubscribed - // The Actor subscribed to or unsubscribed from notifications for an issue. - // - // head_ref_deleted, head_ref_restored - // The pull request’s branch was deleted or restored. - // - Event *string `json:"event,omitempty"` - - CreatedAt *time.Time `json:"created_at,omitempty"` - Issue *Issue `json:"issue,omitempty"` - - // Only present on certain events; see above. - Assignee *User `json:"assignee,omitempty"` - Assigner *User `json:"assigner,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - Label *Label `json:"label,omitempty"` - Rename *Rename `json:"rename,omitempty"` - LockReason *string `json:"lock_reason,omitempty"` - ProjectCard *ProjectCard `json:"project_card,omitempty"` -} - -// ListIssueEvents lists events for the specified issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-an-issue -func (s *IssuesService) ListIssueEvents(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - acceptHeaders := []string{mediaTypeLockReasonPreview, mediaTypeProjectCardDetailsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var events []*IssueEvent - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListRepositoryEvents lists events for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-a-repository -func (s *IssuesService) ListRepositoryEvents(ctx context.Context, owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*IssueEvent - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// GetEvent returns the specified issue event. -// -// GitHub API docs: https://developer.github.com/v3/issues/events/#get-a-single-event -func (s *IssuesService) GetEvent(ctx context.Context, owner, repo string, id int64) (*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - event := new(IssueEvent) - resp, err := s.client.Do(ctx, req, event) - if err != nil { - return nil, resp, err - } - - return event, resp, nil -} - -// Rename contains details for 'renamed' events. -type Rename struct { - From *string `json:"from,omitempty"` - To *string `json:"to,omitempty"` -} - -func (r Rename) String() string { - return Stringify(r) -} diff --git a/vendor/github.com/google/go-github/github/issues_labels.go b/vendor/github.com/google/go-github/github/issues_labels.go deleted file mode 100644 index adcbe06834a5..000000000000 --- a/vendor/github.com/google/go-github/github/issues_labels.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Label represents a GitHub label on an Issue -type Label struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Color *string `json:"color,omitempty"` - Description *string `json:"description,omitempty"` - Default *bool `json:"default,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (l Label) String() string { - return Stringify(l) -} - -// ListLabels lists all labels for a repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository -func (s *IssuesService) ListLabels(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - var labels []*Label - resp, err := s.client.Do(ctx, req, &labels) - if err != nil { - return nil, resp, err - } - - return labels, resp, nil -} - -// GetLabel gets a single label. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#get-a-single-label -func (s *IssuesService) GetLabel(ctx context.Context, owner string, repo string, name string) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - label := new(Label) - resp, err := s.client.Do(ctx, req, label) - if err != nil { - return nil, resp, err - } - - return label, resp, nil -} - -// CreateLabel creates a new label on the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#create-a-label -func (s *IssuesService) CreateLabel(ctx context.Context, owner string, repo string, label *Label) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) - req, err := s.client.NewRequest("POST", u, label) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - l := new(Label) - resp, err := s.client.Do(ctx, req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// EditLabel edits a label. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#update-a-label -func (s *IssuesService) EditLabel(ctx context.Context, owner string, repo string, name string, label *Label) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("PATCH", u, label) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - l := new(Label) - resp, err := s.client.Do(ctx, req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// DeleteLabel deletes a label. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#delete-a-label -func (s *IssuesService) DeleteLabel(ctx context.Context, owner string, repo string, name string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// ListLabelsByIssue lists all labels for an issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#list-labels-on-an-issue -func (s *IssuesService) ListLabelsByIssue(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - var labels []*Label - resp, err := s.client.Do(ctx, req, &labels) - if err != nil { - return nil, resp, err - } - - return labels, resp, nil -} - -// AddLabelsToIssue adds labels to an issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#add-labels-to-an-issue -func (s *IssuesService) AddLabelsToIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("POST", u, labels) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - var l []*Label - resp, err := s.client.Do(ctx, req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// RemoveLabelForIssue removes a label for an issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue -func (s *IssuesService) RemoveLabelForIssue(ctx context.Context, owner string, repo string, number int, label string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - return s.client.Do(ctx, req, nil) -} - -// ReplaceLabelsForIssue replaces all labels for an issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#replace-all-labels-for-an-issue -func (s *IssuesService) ReplaceLabelsForIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("PUT", u, labels) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - var l []*Label - resp, err := s.client.Do(ctx, req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// RemoveLabelsForIssue removes all labels for an issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#remove-all-labels-from-an-issue -func (s *IssuesService) RemoveLabelsForIssue(ctx context.Context, owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - return s.client.Do(ctx, req, nil) -} - -// ListLabelsForMilestone lists labels for every issue in a milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/labels/#get-labels-for-every-issue-in-a-milestone -func (s *IssuesService) ListLabelsForMilestone(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - var labels []*Label - resp, err := s.client.Do(ctx, req, &labels) - if err != nil { - return nil, resp, err - } - - return labels, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/issues_milestones.go b/vendor/github.com/google/go-github/github/issues_milestones.go deleted file mode 100644 index ffe9aae14ca5..000000000000 --- a/vendor/github.com/google/go-github/github/issues_milestones.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// Milestone represents a GitHub repository milestone. -type Milestone struct { - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Title *string `json:"title,omitempty"` - Description *string `json:"description,omitempty"` - Creator *User `json:"creator,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` - ClosedIssues *int `json:"closed_issues,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - DueOn *time.Time `json:"due_on,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (m Milestone) String() string { - return Stringify(m) -} - -// MilestoneListOptions specifies the optional parameters to the -// IssuesService.ListMilestones method. -type MilestoneListOptions struct { - // State filters milestones based on their state. Possible values are: - // open, closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Sort specifies how to sort milestones. Possible values are: due_on, completeness. - // Default value is "due_on". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort milestones. Possible values are: asc, desc. - // Default is "asc". - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListMilestones lists all milestones for a repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#list-milestones-for-a-repository -func (s *IssuesService) ListMilestones(ctx context.Context, owner string, repo string, opt *MilestoneListOptions) ([]*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var milestones []*Milestone - resp, err := s.client.Do(ctx, req, &milestones) - if err != nil { - return nil, resp, err - } - - return milestones, resp, nil -} - -// GetMilestone gets a single milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#get-a-single-milestone -func (s *IssuesService) GetMilestone(ctx context.Context, owner string, repo string, number int) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - milestone := new(Milestone) - resp, err := s.client.Do(ctx, req, milestone) - if err != nil { - return nil, resp, err - } - - return milestone, resp, nil -} - -// CreateMilestone creates a new milestone on the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#create-a-milestone -func (s *IssuesService) CreateMilestone(ctx context.Context, owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) - req, err := s.client.NewRequest("POST", u, milestone) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// EditMilestone edits a milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#update-a-milestone -func (s *IssuesService) EditMilestone(ctx context.Context, owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, milestone) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteMilestone deletes a milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#delete-a-milestone -func (s *IssuesService) DeleteMilestone(ctx context.Context, owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/issues_timeline.go b/vendor/github.com/google/go-github/github/issues_timeline.go deleted file mode 100644 index 9cfda8320269..000000000000 --- a/vendor/github.com/google/go-github/github/issues_timeline.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// Timeline represents an event that occurred around an Issue or Pull Request. -// -// It is similar to an IssueEvent but may contain more information. -// GitHub API docs: https://developer.github.com/v3/issues/timeline/ -type Timeline struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - CommitURL *string `json:"commit_url,omitempty"` - - // The User object that generated the event. - Actor *User `json:"actor,omitempty"` - - // Event identifies the actual type of Event that occurred. Possible values - // are: - // - // assigned - // The issue was assigned to the assignee. - // - // closed - // The issue was closed by the actor. When the commit_id is present, it - // identifies the commit that closed the issue using "closes / fixes #NN" - // syntax. - // - // commented - // A comment was added to the issue. - // - // committed - // A commit was added to the pull request's 'HEAD' branch. Only provided - // for pull requests. - // - // cross-referenced - // The issue was referenced from another issue. The 'source' attribute - // contains the 'id', 'actor', and 'url' of the reference's source. - // - // demilestoned - // The issue was removed from a milestone. - // - // head_ref_deleted - // The pull request's branch was deleted. - // - // head_ref_restored - // The pull request's branch was restored. - // - // labeled - // A label was added to the issue. - // - // locked - // The issue was locked by the actor. - // - // mentioned - // The actor was @mentioned in an issue body. - // - // merged - // The issue was merged by the actor. The 'commit_id' attribute is the - // SHA1 of the HEAD commit that was merged. - // - // milestoned - // The issue was added to a milestone. - // - // referenced - // The issue was referenced from a commit message. The 'commit_id' - // attribute is the commit SHA1 of where that happened. - // - // renamed - // The issue title was changed. - // - // reopened - // The issue was reopened by the actor. - // - // subscribed - // The actor subscribed to receive notifications for an issue. - // - // unassigned - // The assignee was unassigned from the issue. - // - // unlabeled - // A label was removed from the issue. - // - // unlocked - // The issue was unlocked by the actor. - // - // unsubscribed - // The actor unsubscribed to stop receiving notifications for an issue. - // - Event *string `json:"event,omitempty"` - - // The string SHA of a commit that referenced this Issue or Pull Request. - CommitID *string `json:"commit_id,omitempty"` - // The timestamp indicating when the event occurred. - CreatedAt *time.Time `json:"created_at,omitempty"` - // The Label object including `name` and `color` attributes. Only provided for - // 'labeled' and 'unlabeled' events. - Label *Label `json:"label,omitempty"` - // The User object which was assigned to (or unassigned from) this Issue or - // Pull Request. Only provided for 'assigned' and 'unassigned' events. - Assignee *User `json:"assignee,omitempty"` - // The Milestone object including a 'title' attribute. - // Only provided for 'milestoned' and 'demilestoned' events. - Milestone *Milestone `json:"milestone,omitempty"` - // The 'id', 'actor', and 'url' for the source of a reference from another issue. - // Only provided for 'cross-referenced' events. - Source *Source `json:"source,omitempty"` - // An object containing rename details including 'from' and 'to' attributes. - // Only provided for 'renamed' events. - Rename *Rename `json:"rename,omitempty"` -} - -// Source represents a reference's source. -type Source struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Actor *User `json:"actor,omitempty"` -} - -// ListIssueTimeline lists events for the specified issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/timeline/#list-events-for-an-issue -func (s *IssuesService) ListIssueTimeline(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*Timeline, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/timeline", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTimelinePreview) - - var events []*Timeline - resp, err := s.client.Do(ctx, req, &events) - return events, resp, err -} diff --git a/vendor/github.com/google/go-github/github/licenses.go b/vendor/github.com/google/go-github/github/licenses.go deleted file mode 100644 index 1176d3a8bf7b..000000000000 --- a/vendor/github.com/google/go-github/github/licenses.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// LicensesService handles communication with the license related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/licenses/ -type LicensesService service - -// RepositoryLicense represents the license for a repository. -type RepositoryLicense struct { - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - - SHA *string `json:"sha,omitempty"` - Size *int `json:"size,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - DownloadURL *string `json:"download_url,omitempty"` - Type *string `json:"type,omitempty"` - Content *string `json:"content,omitempty"` - Encoding *string `json:"encoding,omitempty"` - License *License `json:"license,omitempty"` -} - -func (l RepositoryLicense) String() string { - return Stringify(l) -} - -// License represents an open source license. -type License struct { - Key *string `json:"key,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - - SPDXID *string `json:"spdx_id,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Featured *bool `json:"featured,omitempty"` - Description *string `json:"description,omitempty"` - Implementation *string `json:"implementation,omitempty"` - Permissions *[]string `json:"permissions,omitempty"` - Conditions *[]string `json:"conditions,omitempty"` - Limitations *[]string `json:"limitations,omitempty"` - Body *string `json:"body,omitempty"` -} - -func (l License) String() string { - return Stringify(l) -} - -// List popular open source licenses. -// -// GitHub API docs: https://developer.github.com/v3/licenses/#list-all-licenses -func (s *LicensesService) List(ctx context.Context) ([]*License, *Response, error) { - req, err := s.client.NewRequest("GET", "licenses", nil) - if err != nil { - return nil, nil, err - } - - var licenses []*License - resp, err := s.client.Do(ctx, req, &licenses) - if err != nil { - return nil, resp, err - } - - return licenses, resp, nil -} - -// Get extended metadata for one license. -// -// GitHub API docs: https://developer.github.com/v3/licenses/#get-an-individual-license -func (s *LicensesService) Get(ctx context.Context, licenseName string) (*License, *Response, error) { - u := fmt.Sprintf("licenses/%s", licenseName) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - license := new(License) - resp, err := s.client.Do(ctx, req, license) - if err != nil { - return nil, resp, err - } - - return license, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go deleted file mode 100644 index 75bc770512d5..000000000000 --- a/vendor/github.com/google/go-github/github/messages.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides functions for validating payloads from GitHub Webhooks. -// GitHub API docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github - -package github - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "hash" - "io/ioutil" - "net/http" - "net/url" - "strings" -) - -const ( - // sha1Prefix is the prefix used by GitHub before the HMAC hexdigest. - sha1Prefix = "sha1" - // sha256Prefix and sha512Prefix are provided for future compatibility. - sha256Prefix = "sha256" - sha512Prefix = "sha512" - // signatureHeader is the GitHub header key used to pass the HMAC hexdigest. - signatureHeader = "X-Hub-Signature" - // eventTypeHeader is the GitHub header key used to pass the event type. - eventTypeHeader = "X-Github-Event" - // deliveryIDHeader is the GitHub header key used to pass the unique ID for the webhook event. - deliveryIDHeader = "X-Github-Delivery" -) - -var ( - // eventTypeMapping maps webhooks types to their corresponding go-github struct types. - eventTypeMapping = map[string]string{ - "check_run": "CheckRunEvent", - "check_suite": "CheckSuiteEvent", - "commit_comment": "CommitCommentEvent", - "create": "CreateEvent", - "delete": "DeleteEvent", - "deployment": "DeploymentEvent", - "deployment_status": "DeploymentStatusEvent", - "fork": "ForkEvent", - "gollum": "GollumEvent", - "installation": "InstallationEvent", - "installation_repositories": "InstallationRepositoriesEvent", - "issue_comment": "IssueCommentEvent", - "issues": "IssuesEvent", - "label": "LabelEvent", - "marketplace_purchase": "MarketplacePurchaseEvent", - "member": "MemberEvent", - "membership": "MembershipEvent", - "milestone": "MilestoneEvent", - "organization": "OrganizationEvent", - "org_block": "OrgBlockEvent", - "page_build": "PageBuildEvent", - "ping": "PingEvent", - "project": "ProjectEvent", - "project_card": "ProjectCardEvent", - "project_column": "ProjectColumnEvent", - "public": "PublicEvent", - "pull_request_review": "PullRequestReviewEvent", - "pull_request_review_comment": "PullRequestReviewCommentEvent", - "pull_request": "PullRequestEvent", - "push": "PushEvent", - "repository": "RepositoryEvent", - "repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent", - "release": "ReleaseEvent", - "status": "StatusEvent", - "team": "TeamEvent", - "team_add": "TeamAddEvent", - "watch": "WatchEvent", - } -) - -// genMAC generates the HMAC signature for a message provided the secret key -// and hashFunc. -func genMAC(message, key []byte, hashFunc func() hash.Hash) []byte { - mac := hmac.New(hashFunc, key) - mac.Write(message) - return mac.Sum(nil) -} - -// checkMAC reports whether messageMAC is a valid HMAC tag for message. -func checkMAC(message, messageMAC, key []byte, hashFunc func() hash.Hash) bool { - expectedMAC := genMAC(message, key, hashFunc) - return hmac.Equal(messageMAC, expectedMAC) -} - -// messageMAC returns the hex-decoded HMAC tag from the signature and its -// corresponding hash function. -func messageMAC(signature string) ([]byte, func() hash.Hash, error) { - if signature == "" { - return nil, nil, errors.New("missing signature") - } - sigParts := strings.SplitN(signature, "=", 2) - if len(sigParts) != 2 { - return nil, nil, fmt.Errorf("error parsing signature %q", signature) - } - - var hashFunc func() hash.Hash - switch sigParts[0] { - case sha1Prefix: - hashFunc = sha1.New - case sha256Prefix: - hashFunc = sha256.New - case sha512Prefix: - hashFunc = sha512.New - default: - return nil, nil, fmt.Errorf("unknown hash type prefix: %q", sigParts[0]) - } - - buf, err := hex.DecodeString(sigParts[1]) - if err != nil { - return nil, nil, fmt.Errorf("error decoding signature %q: %v", signature, err) - } - return buf, hashFunc, nil -} - -// ValidatePayload validates an incoming GitHub Webhook event request -// and returns the (JSON) payload. -// The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded". -// If the Content-Type is neither then an error is returned. -// secretKey is the GitHub Webhook secret message. -// -// Example usage: -// -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := github.ValidatePayload(r, s.webhookSecretKey) -// if err != nil { ... } -// // Process payload... -// } -// -func ValidatePayload(r *http.Request, secretKey []byte) (payload []byte, err error) { - var body []byte // Raw body that GitHub uses to calculate the signature. - - switch ct := r.Header.Get("Content-Type"); ct { - case "application/json": - var err error - if body, err = ioutil.ReadAll(r.Body); err != nil { - return nil, err - } - - // If the content type is application/json, - // the JSON payload is just the original body. - payload = body - - case "application/x-www-form-urlencoded": - // payloadFormParam is the name of the form parameter that the JSON payload - // will be in if a webhook has its content type set to application/x-www-form-urlencoded. - const payloadFormParam = "payload" - - var err error - if body, err = ioutil.ReadAll(r.Body); err != nil { - return nil, err - } - - // If the content type is application/x-www-form-urlencoded, - // the JSON payload will be under the "payload" form param. - form, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - payload = []byte(form.Get(payloadFormParam)) - - default: - return nil, fmt.Errorf("Webhook request has unsupported Content-Type %q", ct) - } - - sig := r.Header.Get(signatureHeader) - if err := ValidateSignature(sig, body, secretKey); err != nil { - return nil, err - } - return payload, nil -} - -// ValidateSignature validates the signature for the given payload. -// signature is the GitHub hash signature delivered in the X-Hub-Signature header. -// payload is the JSON payload sent by GitHub Webhooks. -// secretKey is the GitHub Webhook secret message. -// -// GitHub API docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github -func ValidateSignature(signature string, payload, secretKey []byte) error { - messageMAC, hashFunc, err := messageMAC(signature) - if err != nil { - return err - } - if !checkMAC(payload, messageMAC, secretKey, hashFunc) { - return errors.New("payload signature check failed") - } - return nil -} - -// WebHookType returns the event type of webhook request r. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#webhook-headers -func WebHookType(r *http.Request) string { - return r.Header.Get(eventTypeHeader) -} - -// DeliveryID returns the unique delivery ID of webhook request r. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#webhook-headers -func DeliveryID(r *http.Request) string { - return r.Header.Get(deliveryIDHeader) -} - -// ParseWebHook parses the event payload. For recognized event types, a -// value of the corresponding struct type will be returned (as returned -// by Event.ParsePayload()). An error will be returned for unrecognized event -// types. -// -// Example usage: -// -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := github.ValidatePayload(r, s.webhookSecretKey) -// if err != nil { ... } -// event, err := github.ParseWebHook(github.WebHookType(r), payload) -// if err != nil { ... } -// switch event := event.(type) { -// case *github.CommitCommentEvent: -// processCommitCommentEvent(event) -// case *github.CreateEvent: -// processCreateEvent(event) -// ... -// } -// } -// -func ParseWebHook(messageType string, payload []byte) (interface{}, error) { - eventType, ok := eventTypeMapping[messageType] - if !ok { - return nil, fmt.Errorf("unknown X-Github-Event in message: %v", messageType) - } - - event := Event{ - Type: &eventType, - RawPayload: (*json.RawMessage)(&payload), - } - return event.ParsePayload() -} diff --git a/vendor/github.com/google/go-github/github/migrations.go b/vendor/github.com/google/go-github/github/migrations.go deleted file mode 100644 index 90cc1fae85f0..000000000000 --- a/vendor/github.com/google/go-github/github/migrations.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" - "net/http" - "strings" -) - -// MigrationService provides access to the migration related functions -// in the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/migration/ -type MigrationService service - -// Migration represents a GitHub migration (archival). -type Migration struct { - ID *int64 `json:"id,omitempty"` - GUID *string `json:"guid,omitempty"` - // State is the current state of a migration. - // Possible values are: - // "pending" which means the migration hasn't started yet, - // "exporting" which means the migration is in progress, - // "exported" which means the migration finished successfully, or - // "failed" which means the migration failed. - State *string `json:"state,omitempty"` - // LockRepositories indicates whether repositories are locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` - URL *string `json:"url,omitempty"` - CreatedAt *string `json:"created_at,omitempty"` - UpdatedAt *string `json:"updated_at,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -func (m Migration) String() string { - return Stringify(m) -} - -// MigrationOptions specifies the optional parameters to Migration methods. -type MigrationOptions struct { - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories bool - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments bool -} - -// startMigration represents the body of a StartMigration request. -type startMigration struct { - // Repositories is a slice of repository names to migrate. - Repositories []string `json:"repositories,omitempty"` - - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` -} - -// StartMigration starts the generation of a migration archive. -// repos is a slice of repository names to migrate. -// -// GitHub API docs: https://developer.github.com/v3/migration/migrations/#start-a-migration -func (s *MigrationService) StartMigration(ctx context.Context, org string, repos []string, opt *MigrationOptions) (*Migration, *Response, error) { - u := fmt.Sprintf("orgs/%v/migrations", org) - - body := &startMigration{Repositories: repos} - if opt != nil { - body.LockRepositories = Bool(opt.LockRepositories) - body.ExcludeAttachments = Bool(opt.ExcludeAttachments) - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &Migration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListMigrations lists the most recent migrations. -// -// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-a-list-of-migrations -func (s *MigrationService) ListMigrations(ctx context.Context, org string) ([]*Migration, *Response, error) { - u := fmt.Sprintf("orgs/%v/migrations", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - var m []*Migration - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// MigrationStatus gets the status of a specific migration archive. -// id is the migration ID. -// -// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-the-status-of-a-migration -func (s *MigrationService) MigrationStatus(ctx context.Context, org string, id int64) (*Migration, *Response, error) { - u := fmt.Sprintf("orgs/%v/migrations/%v", org, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &Migration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// MigrationArchiveURL fetches a migration archive URL. -// id is the migration ID. -// -// GitHub API docs: https://developer.github.com/v3/migration/migrations/#download-a-migration-archive -func (s *MigrationService) MigrationArchiveURL(ctx context.Context, org string, id int64) (url string, err error) { - u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - s.client.clientMu.Lock() - defer s.client.clientMu.Unlock() - - // Disable the redirect mechanism because AWS fails if the GitHub auth token is provided. - var loc string - saveRedirect := s.client.client.CheckRedirect - s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - loc = req.URL.String() - return errors.New("disable redirect") - } - defer func() { s.client.client.CheckRedirect = saveRedirect }() - - _, err = s.client.Do(ctx, req, nil) // expect error from disable redirect - if err == nil { - return "", errors.New("expected redirect, none provided") - } - if !strings.Contains(err.Error(), "disable redirect") { - return "", err - } - return loc, nil -} - -// DeleteMigration deletes a previous migration archive. -// id is the migration ID. -// -// GitHub API docs: https://developer.github.com/v3/migration/migrations/#delete-a-migration-archive -func (s *MigrationService) DeleteMigration(ctx context.Context, org string, id int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnlockRepo unlocks a repository that was locked for migration. -// id is the migration ID. -// You should unlock each migrated repository and delete them when the migration -// is complete and you no longer need the source data. -// -// GitHub API docs: https://developer.github.com/v3/migration/migrations/#unlock-a-repository -func (s *MigrationService) UnlockRepo(ctx context.Context, org string, id int64, repo string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/migrations/%v/repos/%v/lock", org, id, repo) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/migrations_source_import.go b/vendor/github.com/google/go-github/github/migrations_source_import.go deleted file mode 100644 index fd45e780065a..000000000000 --- a/vendor/github.com/google/go-github/github/migrations_source_import.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Import represents a repository import request. -type Import struct { - // The URL of the originating repository. - VCSURL *string `json:"vcs_url,omitempty"` - // The originating VCS type. Can be one of 'subversion', 'git', - // 'mercurial', or 'tfvc'. Without this parameter, the import job will - // take additional time to detect the VCS type before beginning the - // import. This detection step will be reflected in the response. - VCS *string `json:"vcs,omitempty"` - // VCSUsername and VCSPassword are only used for StartImport calls that - // are importing a password-protected repository. - VCSUsername *string `json:"vcs_username,omitempty"` - VCSPassword *string `json:"vcs_password,omitempty"` - // For a tfvc import, the name of the project that is being imported. - TFVCProject *string `json:"tfvc_project,omitempty"` - - // LFS related fields that may be preset in the Import Progress response - - // Describes whether the import has been opted in or out of using Git - // LFS. The value can be 'opt_in', 'opt_out', or 'undecided' if no - // action has been taken. - UseLFS *string `json:"use_lfs,omitempty"` - // Describes whether files larger than 100MB were found during the - // importing step. - HasLargeFiles *bool `json:"has_large_files,omitempty"` - // The total size in gigabytes of files larger than 100MB found in the - // originating repository. - LargeFilesSize *int `json:"large_files_size,omitempty"` - // The total number of files larger than 100MB found in the originating - // repository. To see a list of these files, call LargeFiles. - LargeFilesCount *int `json:"large_files_count,omitempty"` - - // Identifies the current status of an import. An import that does not - // have errors will progress through these steps: - // - // detecting - the "detection" step of the import is in progress - // because the request did not include a VCS parameter. The - // import is identifying the type of source control present at - // the URL. - // importing - the "raw" step of the import is in progress. This is - // where commit data is fetched from the original repository. - // The import progress response will include CommitCount (the - // total number of raw commits that will be imported) and - // Percent (0 - 100, the current progress through the import). - // mapping - the "rewrite" step of the import is in progress. This - // is where SVN branches are converted to Git branches, and - // where author updates are applied. The import progress - // response does not include progress information. - // pushing - the "push" step of the import is in progress. This is - // where the importer updates the repository on GitHub. The - // import progress response will include PushPercent, which is - // the percent value reported by git push when it is "Writing - // objects". - // complete - the import is complete, and the repository is ready - // on GitHub. - // - // If there are problems, you will see one of these in the status field: - // - // auth_failed - the import requires authentication in order to - // connect to the original repository. Make an UpdateImport - // request, and include VCSUsername and VCSPassword. - // error - the import encountered an error. The import progress - // response will include the FailedStep and an error message. - // Contact GitHub support for more information. - // detection_needs_auth - the importer requires authentication for - // the originating repository to continue detection. Make an - // UpdatImport request, and include VCSUsername and - // VCSPassword. - // detection_found_nothing - the importer didn't recognize any - // source control at the URL. - // detection_found_multiple - the importer found several projects - // or repositories at the provided URL. When this is the case, - // the Import Progress response will also include a - // ProjectChoices field with the possible project choices as - // values. Make an UpdateImport request, and include VCS and - // (if applicable) TFVCProject. - Status *string `json:"status,omitempty"` - CommitCount *int `json:"commit_count,omitempty"` - StatusText *string `json:"status_text,omitempty"` - AuthorsCount *int `json:"authors_count,omitempty"` - Percent *int `json:"percent,omitempty"` - PushPercent *int `json:"push_percent,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - AuthorsURL *string `json:"authors_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - Message *string `json:"message,omitempty"` - FailedStep *string `json:"failed_step,omitempty"` - - // Human readable display name, provided when the Import appears as - // part of ProjectChoices. - HumanName *string `json:"human_name,omitempty"` - - // When the importer finds several projects or repositories at the - // provided URLs, this will identify the available choices. Call - // UpdateImport with the selected Import value. - ProjectChoices []Import `json:"project_choices,omitempty"` -} - -func (i Import) String() string { - return Stringify(i) -} - -// SourceImportAuthor identifies an author imported from a source repository. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors -type SourceImportAuthor struct { - ID *int64 `json:"id,omitempty"` - RemoteID *string `json:"remote_id,omitempty"` - RemoteName *string `json:"remote_name,omitempty"` - Email *string `json:"email,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - ImportURL *string `json:"import_url,omitempty"` -} - -func (a SourceImportAuthor) String() string { - return Stringify(a) -} - -// LargeFile identifies a file larger than 100MB found during a repository import. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files -type LargeFile struct { - RefName *string `json:"ref_name,omitempty"` - Path *string `json:"path,omitempty"` - OID *string `json:"oid,omitempty"` - Size *int `json:"size,omitempty"` -} - -func (f LargeFile) String() string { - return Stringify(f) -} - -// StartImport initiates a repository import. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#start-an-import -func (s *MigrationService) StartImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("PUT", u, in) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// ImportProgress queries for the status and progress of an ongoing repository import. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-import-progress -func (s *MigrationService) ImportProgress(ctx context.Context, owner, repo string) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// UpdateImport initiates a repository import. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#update-existing-import -func (s *MigrationService) UpdateImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("PATCH", u, in) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// CommitAuthors gets the authors mapped from the original repository. -// -// Each type of source control system represents authors in a different way. -// For example, a Git commit author has a display name and an email address, -// but a Subversion commit author just has a username. The GitHub Importer will -// make the author information valid, but the author might not be correct. For -// example, it will change the bare Subversion username "hubot" into something -// like "hubot ". -// -// This method and MapCommitAuthor allow you to provide correct Git author -// information. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors -func (s *MigrationService) CommitAuthors(ctx context.Context, owner, repo string) ([]*SourceImportAuthor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/authors", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - var authors []*SourceImportAuthor - resp, err := s.client.Do(ctx, req, &authors) - if err != nil { - return nil, resp, err - } - - return authors, resp, nil -} - -// MapCommitAuthor updates an author's identity for the import. Your -// application can continue updating authors any time before you push new -// commits to the repository. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#map-a-commit-author -func (s *MigrationService) MapCommitAuthor(ctx context.Context, owner, repo string, id int64, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/authors/%v", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, author) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - out := new(SourceImportAuthor) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// SetLFSPreference sets whether imported repositories should use Git LFS for -// files larger than 100MB. Only the UseLFS field on the provided Import is -// used. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#set-git-lfs-preference -func (s *MigrationService) SetLFSPreference(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/lfs", owner, repo) - req, err := s.client.NewRequest("PATCH", u, in) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// LargeFiles lists files larger than 100MB found during the import. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files -func (s *MigrationService) LargeFiles(ctx context.Context, owner, repo string) ([]*LargeFile, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/large_files", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - var files []*LargeFile - resp, err := s.client.Do(ctx, req, &files) - if err != nil { - return nil, resp, err - } - - return files, resp, nil -} - -// CancelImport stops an import for a repository. -// -// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#cancel-an-import -func (s *MigrationService) CancelImport(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeImportPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/migrations_user.go b/vendor/github.com/google/go-github/github/migrations_user.go deleted file mode 100644 index d45555f216b7..000000000000 --- a/vendor/github.com/google/go-github/github/migrations_user.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" - "net/http" -) - -// UserMigration represents a GitHub migration (archival). -type UserMigration struct { - ID *int64 `json:"id,omitempty"` - GUID *string `json:"guid,omitempty"` - // State is the current state of a migration. - // Possible values are: - // "pending" which means the migration hasn't started yet, - // "exporting" which means the migration is in progress, - // "exported" which means the migration finished successfully, or - // "failed" which means the migration failed. - State *string `json:"state,omitempty"` - // LockRepositories indicates whether repositories are locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` - URL *string `json:"url,omitempty"` - CreatedAt *string `json:"created_at,omitempty"` - UpdatedAt *string `json:"updated_at,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -func (m UserMigration) String() string { - return Stringify(m) -} - -// UserMigrationOptions specifies the optional parameters to Migration methods. -type UserMigrationOptions struct { - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories bool - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments bool -} - -// startUserMigration represents the body of a StartMigration request. -type startUserMigration struct { - // Repositories is a slice of repository names to migrate. - Repositories []string `json:"repositories,omitempty"` - - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` -} - -// StartUserMigration starts the generation of a migration archive. -// repos is a slice of repository names to migrate. -// -// GitHub API docs: https://developer.github.com/v3/migrations/users/#start-a-user-migration -func (s *MigrationService) StartUserMigration(ctx context.Context, repos []string, opt *UserMigrationOptions) (*UserMigration, *Response, error) { - u := "user/migrations" - - body := &startUserMigration{Repositories: repos} - if opt != nil { - body.LockRepositories = Bool(opt.LockRepositories) - body.ExcludeAttachments = Bool(opt.ExcludeAttachments) - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &UserMigration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListUserMigrations lists the most recent migrations. -// -// GitHub API docs: https://developer.github.com/v3/migrations/users/#get-a-list-of-user-migrations -func (s *MigrationService) ListUserMigrations(ctx context.Context) ([]*UserMigration, *Response, error) { - u := "user/migrations" - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - var m []*UserMigration - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UserMigrationStatus gets the status of a specific migration archive. -// id is the migration ID. -// -// GitHub API docs: https://developer.github.com/v3/migrations/users/#get-the-status-of-a-user-migration -func (s *MigrationService) UserMigrationStatus(ctx context.Context, id int64) (*UserMigration, *Response, error) { - u := fmt.Sprintf("user/migrations/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &UserMigration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UserMigrationArchiveURL gets the URL for a specific migration archive. -// id is the migration ID. -// -// GitHub API docs: https://developer.github.com/v3/migrations/users/#download-a-user-migration-archive -func (s *MigrationService) UserMigrationArchiveURL(ctx context.Context, id int64) (string, error) { - url := fmt.Sprintf("user/migrations/%v/archive", id) - - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return "", err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &UserMigration{} - - var loc string - originalRedirect := s.client.client.CheckRedirect - s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - loc = req.URL.String() - return http.ErrUseLastResponse - } - defer func() { - s.client.client.CheckRedirect = originalRedirect - }() - resp, err := s.client.Do(ctx, req, m) - if err == nil { - return "", errors.New("expected redirect, none provided") - } - loc = resp.Header.Get("Location") - return loc, nil -} - -// DeleteUserMigration will delete a previous migration archive. -// id is the migration ID. -// -// GitHub API docs: https://developer.github.com/v3/migrations/users/#delete-a-user-migration-archive -func (s *MigrationService) DeleteUserMigration(ctx context.Context, id int64) (*Response, error) { - url := fmt.Sprintf("user/migrations/%v/archive", id) - - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnlockUserRepo will unlock a repo that was locked for migration. -// id is migration ID. -// You should unlock each migrated repository and delete them when the migration -// is complete and you no longer need the source data. -// -// GitHub API docs: https://developer.github.com/v3/migrations/users/#unlock-a-user-repository -func (s *MigrationService) UnlockUserRepo(ctx context.Context, id int64, repo string) (*Response, error) { - url := fmt.Sprintf("user/migrations/%v/repos/%v/lock", id, repo) - - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/misc.go b/vendor/github.com/google/go-github/github/misc.go deleted file mode 100644 index e9b0ea22a62d..000000000000 --- a/vendor/github.com/google/go-github/github/misc.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" - "net/url" -) - -// MarkdownOptions specifies optional parameters to the Markdown method. -type MarkdownOptions struct { - // Mode identifies the rendering mode. Possible values are: - // markdown - render a document as plain Markdown, just like - // README files are rendered. - // - // gfm - to render a document as user-content, e.g. like user - // comments or issues are rendered. In GFM mode, hard line breaks are - // always taken into account, and issue and user mentions are linked - // accordingly. - // - // Default is "markdown". - Mode string - - // Context identifies the repository context. Only taken into account - // when rendering as "gfm". - Context string -} - -type markdownRequest struct { - Text *string `json:"text,omitempty"` - Mode *string `json:"mode,omitempty"` - Context *string `json:"context,omitempty"` -} - -// Markdown renders an arbitrary Markdown document. -// -// GitHub API docs: https://developer.github.com/v3/markdown/ -func (c *Client) Markdown(ctx context.Context, text string, opt *MarkdownOptions) (string, *Response, error) { - request := &markdownRequest{Text: String(text)} - if opt != nil { - if opt.Mode != "" { - request.Mode = String(opt.Mode) - } - if opt.Context != "" { - request.Context = String(opt.Context) - } - } - - req, err := c.NewRequest("POST", "markdown", request) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(ctx, req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ListEmojis returns the emojis available to use on GitHub. -// -// GitHub API docs: https://developer.github.com/v3/emojis/ -func (c *Client) ListEmojis(ctx context.Context) (map[string]string, *Response, error) { - req, err := c.NewRequest("GET", "emojis", nil) - if err != nil { - return nil, nil, err - } - - var emoji map[string]string - resp, err := c.Do(ctx, req, &emoji) - if err != nil { - return nil, resp, err - } - - return emoji, resp, nil -} - -// CodeOfConduct represents a code of conduct. -type CodeOfConduct struct { - Name *string `json:"name,omitempty"` - Key *string `json:"key,omitempty"` - URL *string `json:"url,omitempty"` - Body *string `json:"body,omitempty"` -} - -func (c *CodeOfConduct) String() string { - return Stringify(c) -} - -// ListCodesOfConduct returns all codes of conduct. -// -// GitHub API docs: https://developer.github.com/v3/codes_of_conduct/#list-all-codes-of-conduct -func (c *Client) ListCodesOfConduct(ctx context.Context) ([]*CodeOfConduct, *Response, error) { - req, err := c.NewRequest("GET", "codes_of_conduct", nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - - var cs []*CodeOfConduct - resp, err := c.Do(ctx, req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// GetCodeOfConduct returns an individual code of conduct. -// -// https://developer.github.com/v3/codes_of_conduct/#get-an-individual-code-of-conduct -func (c *Client) GetCodeOfConduct(ctx context.Context, key string) (*CodeOfConduct, *Response, error) { - u := fmt.Sprintf("codes_of_conduct/%s", key) - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - - coc := new(CodeOfConduct) - resp, err := c.Do(ctx, req, coc) - if err != nil { - return nil, resp, err - } - - return coc, resp, nil -} - -// APIMeta represents metadata about the GitHub API. -type APIMeta struct { - // An Array of IP addresses in CIDR format specifying the addresses - // that incoming service hooks will originate from on GitHub.com. - Hooks []string `json:"hooks,omitempty"` - - // An Array of IP addresses in CIDR format specifying the Git servers - // for GitHub.com. - Git []string `json:"git,omitempty"` - - // Whether authentication with username and password is supported. - // (GitHub Enterprise instances using CAS or OAuth for authentication - // will return false. Features like Basic Authentication with a - // username and password, sudo mode, and two-factor authentication are - // not supported on these servers.) - VerifiablePasswordAuthentication *bool `json:"verifiable_password_authentication,omitempty"` - - // An array of IP addresses in CIDR format specifying the addresses - // which serve GitHub Pages websites. - Pages []string `json:"pages,omitempty"` - - // An Array of IP addresses specifying the addresses that source imports - // will originate from on GitHub.com. - Importer []string `json:"importer,omitempty"` -} - -// APIMeta returns information about GitHub.com, the service. Or, if you access -// this endpoint on your organization’s GitHub Enterprise installation, this -// endpoint provides information about that installation. -// -// GitHub API docs: https://developer.github.com/v3/meta/ -func (c *Client) APIMeta(ctx context.Context) (*APIMeta, *Response, error) { - req, err := c.NewRequest("GET", "meta", nil) - if err != nil { - return nil, nil, err - } - - meta := new(APIMeta) - resp, err := c.Do(ctx, req, meta) - if err != nil { - return nil, resp, err - } - - return meta, resp, nil -} - -// Octocat returns an ASCII art octocat with the specified message in a speech -// bubble. If message is empty, a random zen phrase is used. -func (c *Client) Octocat(ctx context.Context, message string) (string, *Response, error) { - u := "octocat" - if message != "" { - u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message)) - } - - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(ctx, req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// Zen returns a random line from The Zen of GitHub. -// -// see also: http://warpspire.com/posts/taste/ -func (c *Client) Zen(ctx context.Context) (string, *Response, error) { - req, err := c.NewRequest("GET", "zen", nil) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(ctx, req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ServiceHook represents a hook that has configuration settings, a list of -// available events, and default events. -type ServiceHook struct { - Name *string `json:"name,omitempty"` - Events []string `json:"events,omitempty"` - SupportedEvents []string `json:"supported_events,omitempty"` - Schema [][]string `json:"schema,omitempty"` -} - -func (s *ServiceHook) String() string { - return Stringify(s) -} - -// ListServiceHooks lists all of the available service hooks. -// -// GitHub API docs: https://developer.github.com/webhooks/#services -func (c *Client) ListServiceHooks(ctx context.Context) ([]*ServiceHook, *Response, error) { - u := "hooks" - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var hooks []*ServiceHook - resp, err := c.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go deleted file mode 100644 index c70039ba080f..000000000000 --- a/vendor/github.com/google/go-github/github/orgs.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// OrganizationsService provides access to the organization related functions -// in the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/orgs/ -type OrganizationsService service - -// Organization represents a GitHub organization account. -type Organization struct { - Login *string `json:"login,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Name *string `json:"name,omitempty"` - Company *string `json:"company,omitempty"` - Blog *string `json:"blog,omitempty"` - Location *string `json:"location,omitempty"` - Email *string `json:"email,omitempty"` - Description *string `json:"description,omitempty"` - PublicRepos *int `json:"public_repos,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` - Followers *int `json:"followers,omitempty"` - Following *int `json:"following,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - TotalPrivateRepos *int `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - DiskUsage *int `json:"disk_usage,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - BillingEmail *string `json:"billing_email,omitempty"` - Type *string `json:"type,omitempty"` - Plan *Plan `json:"plan,omitempty"` - TwoFactorRequirementEnabled *bool `json:"two_factor_requirement_enabled,omitempty"` - - // DefaultRepoPermission can be one of: "read", "write", "admin", or "none". (Default: "read"). - // It is only used in OrganizationsService.Edit. - DefaultRepoPermission *string `json:"default_repository_permission,omitempty"` - // DefaultRepoSettings can be one of: "read", "write", "admin", or "none". (Default: "read"). - // It is only used in OrganizationsService.Get. - DefaultRepoSettings *string `json:"default_repository_settings,omitempty"` - - // MembersCanCreateRepos default value is true and is only used in Organizations.Edit. - MembersCanCreateRepos *bool `json:"members_can_create_repositories,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - HooksURL *string `json:"hooks_url,omitempty"` - IssuesURL *string `json:"issues_url,omitempty"` - MembersURL *string `json:"members_url,omitempty"` - PublicMembersURL *string `json:"public_members_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` -} - -func (o Organization) String() string { - return Stringify(o) -} - -// Plan represents the payment plan for an account. See plans at https://github.com/plans. -type Plan struct { - Name *string `json:"name,omitempty"` - Space *int `json:"space,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - PrivateRepos *int `json:"private_repos,omitempty"` -} - -func (p Plan) String() string { - return Stringify(p) -} - -// OrganizationsListOptions specifies the optional parameters to the -// OrganizationsService.ListAll method. -type OrganizationsListOptions struct { - // Since filters Organizations by ID. - Since int64 `url:"since,omitempty"` - - // Note: Pagination is powered exclusively by the Since parameter, - // ListOptions.Page has no effect. - // ListOptions.PerPage controls an undocumented GitHub API parameter. - ListOptions -} - -// ListAll lists all organizations, in the order that they were created on GitHub. -// -// Note: Pagination is powered exclusively by the since parameter. To continue -// listing the next set of organizations, use the ID of the last-returned organization -// as the opts.Since parameter for the next call. -// -// GitHub API docs: https://developer.github.com/v3/orgs/#list-all-organizations -func (s *OrganizationsService) ListAll(ctx context.Context, opt *OrganizationsListOptions) ([]*Organization, *Response, error) { - u, err := addOptions("organizations", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - orgs := []*Organization{} - resp, err := s.client.Do(ctx, req, &orgs) - if err != nil { - return nil, resp, err - } - return orgs, resp, nil -} - -// List the organizations for a user. Passing the empty string will list -// organizations for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/orgs/#list-user-organizations -func (s *OrganizationsService) List(ctx context.Context, user string, opt *ListOptions) ([]*Organization, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/orgs", user) - } else { - u = "user/orgs" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var orgs []*Organization - resp, err := s.client.Do(ctx, req, &orgs) - if err != nil { - return nil, resp, err - } - - return orgs, resp, nil -} - -// Get fetches an organization by name. -// -// GitHub API docs: https://developer.github.com/v3/orgs/#get-an-organization -func (s *OrganizationsService) Get(ctx context.Context, org string) (*Organization, *Response, error) { - u := fmt.Sprintf("orgs/%v", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - organization := new(Organization) - resp, err := s.client.Do(ctx, req, organization) - if err != nil { - return nil, resp, err - } - - return organization, resp, nil -} - -// GetByID fetches an organization. -// -// Note: GetByID uses the undocumented GitHub API endpoint /organizations/:id. -func (s *OrganizationsService) GetByID(ctx context.Context, id int64) (*Organization, *Response, error) { - u := fmt.Sprintf("organizations/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - organization := new(Organization) - resp, err := s.client.Do(ctx, req, organization) - if err != nil { - return nil, resp, err - } - - return organization, resp, nil -} - -// Edit an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/#edit-an-organization -func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organization) (*Organization, *Response, error) { - u := fmt.Sprintf("orgs/%v", name) - req, err := s.client.NewRequest("PATCH", u, org) - if err != nil { - return nil, nil, err - } - - o := new(Organization) - resp, err := s.client.Do(ctx, req, o) - if err != nil { - return nil, resp, err - } - - return o, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/orgs_hooks.go b/vendor/github.com/google/go-github/github/orgs_hooks.go deleted file mode 100644 index b710ea402396..000000000000 --- a/vendor/github.com/google/go-github/github/orgs_hooks.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListHooks lists all Hooks for the specified organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#list-hooks -func (s *OrganizationsService) ListHooks(ctx context.Context, org string, opt *ListOptions) ([]*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var hooks []*Hook - resp, err := s.client.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} - -// GetHook returns a single specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#get-single-hook -func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int64) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - hook := new(Hook) - resp, err := s.client.Do(ctx, req, hook) - return hook, resp, err -} - -// CreateHook creates a Hook for the specified org. -// Config is a required field. -// -// Note that only a subset of the hook fields are used and hook must -// not be nil. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#create-a-hook -func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks", org) - - hookReq := &createHookRequest{ - Events: hook.Events, - Active: hook.Active, - Config: hook.Config, - } - - req, err := s.client.NewRequest("POST", u, hookReq) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// EditHook updates a specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#edit-a-hook -func (s *OrganizationsService) EditHook(ctx context.Context, org string, id int64, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - return h, resp, err -} - -// PingHook triggers a 'ping' event to be sent to the Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#ping-a-hook -func (s *OrganizationsService) PingHook(ctx context.Context, org string, id int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// DeleteHook deletes a specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#delete-a-hook -func (s *OrganizationsService) DeleteHook(ctx context.Context, org string, id int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/orgs_members.go b/vendor/github.com/google/go-github/github/orgs_members.go deleted file mode 100644 index d18435999c96..000000000000 --- a/vendor/github.com/google/go-github/github/orgs_members.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Membership represents the status of a user's membership in an organization or team. -type Membership struct { - URL *string `json:"url,omitempty"` - - // State is the user's status within the organization or team. - // Possible values are: "active", "pending" - State *string `json:"state,omitempty"` - - // Role identifies the user's role within the organization or team. - // Possible values for organization membership: - // member - non-owner organization member - // admin - organization owner - // - // Possible values for team membership are: - // member - a normal member of the team - // maintainer - a team maintainer. Able to add/remove other team - // members, promote other team members to team - // maintainer, and edit the team’s name and description - Role *string `json:"role,omitempty"` - - // For organization membership, the API URL of the organization. - OrganizationURL *string `json:"organization_url,omitempty"` - - // For organization membership, the organization the membership is for. - Organization *Organization `json:"organization,omitempty"` - - // For organization membership, the user the membership is for. - User *User `json:"user,omitempty"` -} - -func (m Membership) String() string { - return Stringify(m) -} - -// ListMembersOptions specifies optional parameters to the -// OrganizationsService.ListMembers method. -type ListMembersOptions struct { - // If true (or if the authenticated user is not an owner of the - // organization), list only publicly visible members. - PublicOnly bool `url:"-"` - - // Filter members returned in the list. Possible values are: - // 2fa_disabled, all. Default is "all". - Filter string `url:"filter,omitempty"` - - // Role filters members returned by their role in the organization. - // Possible values are: - // all - all members of the organization, regardless of role - // admin - organization owners - // member - non-owner organization members - // - // Default is "all". - Role string `url:"role,omitempty"` - - ListOptions -} - -// ListMembers lists the members for an organization. If the authenticated -// user is an owner of the organization, this will return both concealed and -// public members, otherwise it will only return public members. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#members-list -func (s *OrganizationsService) ListMembers(ctx context.Context, org string, opt *ListMembersOptions) ([]*User, *Response, error) { - var u string - if opt != nil && opt.PublicOnly { - u = fmt.Sprintf("orgs/%v/public_members", org) - } else { - u = fmt.Sprintf("orgs/%v/members", org) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// IsMember checks if a user is a member of an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#check-membership -func (s *OrganizationsService) IsMember(ctx context.Context, org, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/members/%v", org, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// IsPublicMember checks if a user is a public member of an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#check-public-membership -func (s *OrganizationsService) IsPublicMember(ctx context.Context, org, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// RemoveMember removes a user from all teams of an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-a-member -func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/members/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// PublicizeMembership publicizes a user's membership in an organization. (A -// user cannot publicize the membership for another user.) -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#publicize-a-users-membership -func (s *OrganizationsService) PublicizeMembership(ctx context.Context, org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ConcealMembership conceals a user's membership in an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#conceal-a-users-membership -func (s *OrganizationsService) ConcealMembership(ctx context.Context, org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListOrgMembershipsOptions specifies optional parameters to the -// OrganizationsService.ListOrgMemberships method. -type ListOrgMembershipsOptions struct { - // Filter memberships to include only those with the specified state. - // Possible values are: "active", "pending". - State string `url:"state,omitempty"` - - ListOptions -} - -// ListOrgMemberships lists the organization memberships for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-your-organization-memberships -func (s *OrganizationsService) ListOrgMemberships(ctx context.Context, opt *ListOrgMembershipsOptions) ([]*Membership, *Response, error) { - u := "user/memberships/orgs" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var memberships []*Membership - resp, err := s.client.Do(ctx, req, &memberships) - if err != nil { - return nil, resp, err - } - - return memberships, resp, nil -} - -// GetOrgMembership gets the membership for a user in a specified organization. -// Passing an empty string for user will get the membership for the -// authenticated user. -// -// GitHub API docs: -// https://developer.github.com/v3/orgs/members/#get-organization-membership -// https://developer.github.com/v3/orgs/members/#get-your-organization-membership -func (s *OrganizationsService) GetOrgMembership(ctx context.Context, user, org string) (*Membership, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("orgs/%v/memberships/%v", org, user) - } else { - u = fmt.Sprintf("user/memberships/orgs/%v", org) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - membership := new(Membership) - resp, err := s.client.Do(ctx, req, membership) - if err != nil { - return nil, resp, err - } - - return membership, resp, nil -} - -// EditOrgMembership edits the membership for user in specified organization. -// Passing an empty string for user will edit the membership for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership -// GitHub API docs: https://developer.github.com/v3/orgs/members/#edit-your-organization-membership -func (s *OrganizationsService) EditOrgMembership(ctx context.Context, user, org string, membership *Membership) (*Membership, *Response, error) { - var u, method string - if user != "" { - u = fmt.Sprintf("orgs/%v/memberships/%v", org, user) - method = "PUT" - } else { - u = fmt.Sprintf("user/memberships/orgs/%v", org) - method = "PATCH" - } - - req, err := s.client.NewRequest(method, u, membership) - if err != nil { - return nil, nil, err - } - - m := new(Membership) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// RemoveOrgMembership removes user from the specified organization. If the -// user has been invited to the organization, this will cancel their invitation. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-organization-membership -func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, org string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/memberships/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListPendingOrgInvitations returns a list of pending invitations. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-pending-organization-invitations -func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org string, opt *ListOptions) ([]*Invitation, *Response, error) { - u := fmt.Sprintf("orgs/%v/invitations", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pendingInvitations []*Invitation - resp, err := s.client.Do(ctx, req, &pendingInvitations) - if err != nil { - return nil, resp, err - } - return pendingInvitations, resp, nil -} - -// CreateOrgInvitationOptions specifies the parameters to the OrganizationService.Invite -// method. -type CreateOrgInvitationOptions struct { - // GitHub user ID for the person you are inviting. Not required if you provide Email. - InviteeID *int64 `json:"invitee_id,omitempty"` - // Email address of the person you are inviting, which can be an existing GitHub user. - // Not required if you provide InviteeID - Email *string `json:"email,omitempty"` - // Specify role for new member. Can be one of: - // * admin - Organization owners with full administrative rights to the - // organization and complete access to all repositories and teams. - // * direct_member - Non-owner organization members with ability to see - // other members and join teams by invitation. - // * billing_manager - Non-owner organization members with ability to - // manage the billing settings of your organization. - // Default is "direct_member". - Role *string `json:"role"` - TeamID []int64 `json:"team_ids"` -} - -// CreateOrgInvitation invites people to an organization by using their GitHub user ID or their email address. -// In order to create invitations in an organization, -// the authenticated user must be an organization owner. -// -// https://developer.github.com/v3/orgs/members/#create-organization-invitation -func (s *OrganizationsService) CreateOrgInvitation(ctx context.Context, org string, opt *CreateOrgInvitationOptions) (*Invitation, *Response, error) { - u := fmt.Sprintf("orgs/%v/invitations", org) - - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeOrganizationInvitationPreview) - - var invitation *Invitation - resp, err := s.client.Do(ctx, req, &invitation) - if err != nil { - return nil, resp, err - } - return invitation, resp, nil -} - -// ListOrgInvitationTeams lists all teams associated with an invitation. In order to see invitations in an organization, -// the authenticated user must be an organization owner. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-organization-invitation-teams -func (s *OrganizationsService) ListOrgInvitationTeams(ctx context.Context, org, invitationID string, opt *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/invitations/%v/teams", org, invitationID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeOrganizationInvitationPreview) - - var orgInvitationTeams []*Team - resp, err := s.client.Do(ctx, req, &orgInvitationTeams) - if err != nil { - return nil, resp, err - } - return orgInvitationTeams, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/github/orgs_outside_collaborators.go deleted file mode 100644 index 85ffd05f61aa..000000000000 --- a/vendor/github.com/google/go-github/github/orgs_outside_collaborators.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListOutsideCollaboratorsOptions specifies optional parameters to the -// OrganizationsService.ListOutsideCollaborators method. -type ListOutsideCollaboratorsOptions struct { - // Filter outside collaborators returned in the list. Possible values are: - // 2fa_disabled, all. Default is "all". - Filter string `url:"filter,omitempty"` - - ListOptions -} - -// ListOutsideCollaborators lists outside collaborators of organization's repositories. -// This will only work if the authenticated -// user is an owner of the organization. -// -// Warning: The API may change without advance notice during the preview period. -// Preview features are not supported for production use. -// -// GitHub API docs: https://developer.github.com/v3/orgs/outside_collaborators/#list-outside-collaborators -func (s *OrganizationsService) ListOutsideCollaborators(ctx context.Context, org string, opt *ListOutsideCollaboratorsOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("orgs/%v/outside_collaborators", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// RemoveOutsideCollaborator removes a user from the list of outside collaborators; -// consequently, removing them from all the organization's repositories. -// -// GitHub API docs: https://developer.github.com/v3/orgs/outside_collaborators/#remove-outside-collaborator -func (s *OrganizationsService) RemoveOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/outside_collaborators/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ConvertMemberToOutsideCollaborator reduces the permission level of a member of the -// organization to that of an outside collaborator. Therefore, they will only -// have access to the repositories that their current team membership allows. -// Responses for converting a non-member or the last owner to an outside collaborator -// are listed in GitHub API docs. -// -// GitHub API docs: https://developer.github.com/v3/orgs/outside_collaborators/#convert-member-to-outside-collaborator -func (s *OrganizationsService) ConvertMemberToOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/outside_collaborators/%v", org, user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/orgs_projects.go b/vendor/github.com/google/go-github/github/orgs_projects.go deleted file mode 100644 index e57cba97829e..000000000000 --- a/vendor/github.com/google/go-github/github/orgs_projects.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListProjects lists the projects for an organization. -// -// GitHub API docs: https://developer.github.com/v3/projects/#list-organization-projects -func (s *OrganizationsService) ListProjects(ctx context.Context, org string, opt *ProjectListOptions) ([]*Project, *Response, error) { - u := fmt.Sprintf("orgs/%v/projects", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// CreateProject creates a GitHub Project for the specified organization. -// -// GitHub API docs: https://developer.github.com/v3/projects/#create-an-organization-project -func (s *OrganizationsService) CreateProject(ctx context.Context, org string, opt *ProjectOptions) (*Project, *Response, error) { - u := fmt.Sprintf("orgs/%v/projects", org) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/github/orgs_users_blocking.go deleted file mode 100644 index b1aecf44532c..000000000000 --- a/vendor/github.com/google/go-github/github/orgs_users_blocking.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListBlockedUsers lists all the users blocked by an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#list-blocked-users -func (s *OrganizationsService) ListBlockedUsers(ctx context.Context, org string, opt *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("orgs/%v/blocks", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - var blockedUsers []*User - resp, err := s.client.Do(ctx, req, &blockedUsers) - if err != nil { - return nil, resp, err - } - - return blockedUsers, resp, nil -} - -// IsBlocked reports whether specified user is blocked from an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#check-whether-a-user-is-blocked-from-an-organization -func (s *OrganizationsService) IsBlocked(ctx context.Context, org string, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - resp, err := s.client.Do(ctx, req, nil) - isBlocked, err := parseBoolResponse(err) - return isBlocked, resp, err -} - -// BlockUser blocks specified user from an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#block-a-user -func (s *OrganizationsService) BlockUser(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnblockUser unblocks specified user from an organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#unblock-a-user -func (s *OrganizationsService) UnblockUser(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go deleted file mode 100644 index 423968ec846c..000000000000 --- a/vendor/github.com/google/go-github/github/projects.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ProjectsService provides access to the projects functions in the -// GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/projects/ -type ProjectsService service - -// Project represents a GitHub Project. -type Project struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - ColumnsURL *string `json:"columns_url,omitempty"` - OwnerURL *string `json:"owner_url,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` - - // The User object that generated the project. - Creator *User `json:"creator,omitempty"` -} - -func (p Project) String() string { - return Stringify(p) -} - -// GetProject gets a GitHub Project for a repo. -// -// GitHub API docs: https://developer.github.com/v3/projects/#get-a-project -func (s *ProjectsService) GetProject(ctx context.Context, id int64) (*Project, *Response, error) { - u := fmt.Sprintf("projects/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} - -// ProjectOptions specifies the parameters to the -// RepositoriesService.CreateProject and -// ProjectsService.UpdateProject methods. -type ProjectOptions struct { - // The name of the project. (Required for creation; optional for update.) - Name *string `json:"name,omitempty"` - // The body of the project. (Optional.) - Body *string `json:"body,omitempty"` - - // The following field(s) are only applicable for update. - // They should be left with zero values for creation. - - // State of the project. Either "open" or "closed". (Optional.) - State *string `json:"state,omitempty"` - // The permission level that all members of the project's organization - // will have on this project. - // Setting the organization permission is only available - // for organization projects. (Optional.) - OrganizationPermission *string `json:"organization_permission,omitempty"` - // Sets visibility of the project within the organization. - // Setting visibility is only available - // for organization projects.(Optional.) - Public *bool `json:"public,omitempty"` -} - -// UpdateProject updates a repository project. -// -// GitHub API docs: https://developer.github.com/v3/projects/#update-a-project -func (s *ProjectsService) UpdateProject(ctx context.Context, id int64, opt *ProjectOptions) (*Project, *Response, error) { - u := fmt.Sprintf("projects/%v", id) - req, err := s.client.NewRequest("PATCH", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} - -// DeleteProject deletes a GitHub Project from a repository. -// -// GitHub API docs: https://developer.github.com/v3/projects/#delete-a-project -func (s *ProjectsService) DeleteProject(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("projects/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectColumn represents a column of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/repos/projects/ -type ProjectColumn struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - ProjectURL *string `json:"project_url,omitempty"` - CardsURL *string `json:"cards_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// ListProjectColumns lists the columns of a GitHub Project for a repo. -// -// GitHub API docs: https://developer.github.com/v3/projects/columns/#list-project-columns -func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int64, opt *ListOptions) ([]*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/%v/columns", projectID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - columns := []*ProjectColumn{} - resp, err := s.client.Do(ctx, req, &columns) - if err != nil { - return nil, resp, err - } - - return columns, resp, nil -} - -// GetProjectColumn gets a column of a GitHub Project for a repo. -// -// GitHub API docs: https://developer.github.com/v3/projects/columns/#get-a-project-column -func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int64) (*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/columns/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - column := &ProjectColumn{} - resp, err := s.client.Do(ctx, req, column) - if err != nil { - return nil, resp, err - } - - return column, resp, nil -} - -// ProjectColumnOptions specifies the parameters to the -// ProjectsService.CreateProjectColumn and -// ProjectsService.UpdateProjectColumn methods. -type ProjectColumnOptions struct { - // The name of the project column. (Required for creation and update.) - Name string `json:"name"` -} - -// CreateProjectColumn creates a column for the specified (by number) project. -// -// GitHub API docs: https://developer.github.com/v3/projects/columns/#create-a-project-column -func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int64, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/%v/columns", projectID) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - column := &ProjectColumn{} - resp, err := s.client.Do(ctx, req, column) - if err != nil { - return nil, resp, err - } - - return column, resp, nil -} - -// UpdateProjectColumn updates a column of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/columns/#update-a-project-column -func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int64, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/columns/%v", columnID) - req, err := s.client.NewRequest("PATCH", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - column := &ProjectColumn{} - resp, err := s.client.Do(ctx, req, column) - if err != nil { - return nil, resp, err - } - - return column, resp, nil -} - -// DeleteProjectColumn deletes a column from a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/columns/#delete-a-project-column -func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int64) (*Response, error) { - u := fmt.Sprintf("projects/columns/%v", columnID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectColumnMoveOptions specifies the parameters to the -// ProjectsService.MoveProjectColumn method. -type ProjectColumnMoveOptions struct { - // Position can be one of "first", "last", or "after:", where - // is the ID of a column in the same project. (Required.) - Position string `json:"position"` -} - -// MoveProjectColumn moves a column within a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/columns/#move-a-project-column -func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int64, opt *ProjectColumnMoveOptions) (*Response, error) { - u := fmt.Sprintf("projects/columns/%v/moves", columnID) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectCard represents a card in a column of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card -type ProjectCard struct { - URL *string `json:"url,omitempty"` - ColumnURL *string `json:"column_url,omitempty"` - ContentURL *string `json:"content_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Note *string `json:"note,omitempty"` - Creator *User `json:"creator,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Archived *bool `json:"archived,omitempty"` - - // The following fields are only populated by Webhook events. - ColumnID *int64 `json:"column_id,omitempty"` - - // The following fields are only populated by Events API. - ProjectID *int64 `json:"project_id,omitempty"` - ProjectURL *string `json:"project_url,omitempty"` - ColumnName *string `json:"column_name,omitempty"` - PreviousColumnName *string `json:"previous_column_name,omitempty"` // Populated in "moved_columns_in_project" event deliveries. -} - -// ProjectCardListOptions specifies the optional parameters to the -// ProjectsService.ListProjectCards method. -type ProjectCardListOptions struct { - // ArchivedState is used to list all, archived, or not_archived project cards. - // Defaults to not_archived when you omit this parameter. - ArchivedState *string `url:"archived_state,omitempty"` - - ListOptions -} - -// ListProjectCards lists the cards in a column of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#list-project-cards -func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int64, opt *ProjectCardListOptions) ([]*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/%v/cards", columnID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - cards := []*ProjectCard{} - resp, err := s.client.Do(ctx, req, &cards) - if err != nil { - return nil, resp, err - } - - return cards, resp, nil -} - -// GetProjectCard gets a card in a column of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card -func (s *ProjectsService) GetProjectCard(ctx context.Context, columnID int64) (*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v", columnID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - card := &ProjectCard{} - resp, err := s.client.Do(ctx, req, card) - if err != nil { - return nil, resp, err - } - - return card, resp, nil -} - -// ProjectCardOptions specifies the parameters to the -// ProjectsService.CreateProjectCard and -// ProjectsService.UpdateProjectCard methods. -type ProjectCardOptions struct { - // The note of the card. Note and ContentID are mutually exclusive. - Note string `json:"note,omitempty"` - // The ID (not Number) of the Issue to associate with this card. - // Note and ContentID are mutually exclusive. - ContentID int64 `json:"content_id,omitempty"` - // The type of content to associate with this card. Possible values are: "Issue" and "PullRequest". - ContentType string `json:"content_type,omitempty"` - // Use true to archive a project card. - // Specify false if you need to restore a previously archived project card. - Archived *bool `json:"archived,omitempty"` -} - -// CreateProjectCard creates a card in the specified column of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#create-a-project-card -func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int64, opt *ProjectCardOptions) (*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/%v/cards", columnID) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - card := &ProjectCard{} - resp, err := s.client.Do(ctx, req, card) - if err != nil { - return nil, resp, err - } - - return card, resp, nil -} - -// UpdateProjectCard updates a card of a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#update-a-project-card -func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int64, opt *ProjectCardOptions) (*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v", cardID) - req, err := s.client.NewRequest("PATCH", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - card := &ProjectCard{} - resp, err := s.client.Do(ctx, req, card) - if err != nil { - return nil, resp, err - } - - return card, resp, nil -} - -// DeleteProjectCard deletes a card from a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#delete-a-project-card -func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int64) (*Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v", cardID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectCardMoveOptions specifies the parameters to the -// ProjectsService.MoveProjectCard method. -type ProjectCardMoveOptions struct { - // Position can be one of "top", "bottom", or "after:", where - // is the ID of a card in the same project. - Position string `json:"position"` - // ColumnID is the ID of a column in the same project. Note that ColumnID - // is required when using Position "after:" when that card is in - // another column; otherwise it is optional. - ColumnID int64 `json:"column_id,omitempty"` -} - -// MoveProjectCard moves a card within a GitHub Project. -// -// GitHub API docs: https://developer.github.com/v3/projects/cards/#move-a-project-card -func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int64, opt *ProjectCardMoveOptions) (*Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go deleted file mode 100644 index aac34792bd0f..000000000000 --- a/vendor/github.com/google/go-github/github/pulls.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" - "strings" - "time" -) - -// PullRequestsService handles communication with the pull request related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/pulls/ -type PullRequestsService service - -// PullRequest represents a GitHub pull request on a repository. -type PullRequest struct { - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - MergedAt *time.Time `json:"merged_at,omitempty"` - Labels []*Label `json:"labels,omitempty"` - User *User `json:"user,omitempty"` - Merged *bool `json:"merged,omitempty"` - Mergeable *bool `json:"mergeable,omitempty"` - MergeableState *string `json:"mergeable_state,omitempty"` - MergedBy *User `json:"merged_by,omitempty"` - MergeCommitSHA *string `json:"merge_commit_sha,omitempty"` - Comments *int `json:"comments,omitempty"` - Commits *int `json:"commits,omitempty"` - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - ChangedFiles *int `json:"changed_files,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - IssueURL *string `json:"issue_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` - CommitsURL *string `json:"commits_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - ReviewCommentsURL *string `json:"review_comments_url,omitempty"` - ReviewCommentURL *string `json:"review_comment_url,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Assignees []*User `json:"assignees,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"` - AuthorAssociation *string `json:"author_association,omitempty"` - NodeID *string `json:"node_id,omitempty"` - RequestedReviewers []*User `json:"requested_reviewers,omitempty"` - - // RequestedTeams is populated as part of the PullRequestEvent. - // See, https://developer.github.com/v3/activity/events/types/#pullrequestevent for an example. - RequestedTeams []*Team `json:"requested_teams,omitempty"` - - Links *PRLinks `json:"_links,omitempty"` - Head *PullRequestBranch `json:"head,omitempty"` - Base *PullRequestBranch `json:"base,omitempty"` - - // ActiveLockReason is populated only when LockReason is provided while locking the pull request. - // Possible values are: "off-topic", "too heated", "resolved", and "spam". - ActiveLockReason *string `json:"active_lock_reason,omitempty"` -} - -func (p PullRequest) String() string { - return Stringify(p) -} - -// PRLink represents a single link object from Github pull request _links. -type PRLink struct { - HRef *string `json:"href,omitempty"` -} - -// PRLinks represents the "_links" object in a Github pull request. -type PRLinks struct { - Self *PRLink `json:"self,omitempty"` - HTML *PRLink `json:"html,omitempty"` - Issue *PRLink `json:"issue,omitempty"` - Comments *PRLink `json:"comments,omitempty"` - ReviewComments *PRLink `json:"review_comments,omitempty"` - ReviewComment *PRLink `json:"review_comment,omitempty"` - Commits *PRLink `json:"commits,omitempty"` - Statuses *PRLink `json:"statuses,omitempty"` -} - -// PullRequestBranch represents a base or head branch in a GitHub pull request. -type PullRequestBranch struct { - Label *string `json:"label,omitempty"` - Ref *string `json:"ref,omitempty"` - SHA *string `json:"sha,omitempty"` - Repo *Repository `json:"repo,omitempty"` - User *User `json:"user,omitempty"` -} - -// PullRequestListOptions specifies the optional parameters to the -// PullRequestsService.List method. -type PullRequestListOptions struct { - // State filters pull requests based on their state. Possible values are: - // open, closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Head filters pull requests by head user and branch name in the format of: - // "user:ref-name". - Head string `url:"head,omitempty"` - - // Base filters pull requests by base branch name. - Base string `url:"base,omitempty"` - - // Sort specifies how to sort pull requests. Possible values are: created, - // updated, popularity, long-running. Default is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort pull requests. Possible values are: asc, desc. - // If Sort is "created" or not specified, Default is "desc", otherwise Default - // is "asc" - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// List the pull requests for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests -func (s *PullRequestsService) List(ctx context.Context, owner string, repo string, opt *PullRequestListOptions) ([]*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeLabelDescriptionSearchPreview, mediaTypeLockReasonPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var pulls []*PullRequest - resp, err := s.client.Do(ctx, req, &pulls) - if err != nil { - return nil, resp, err - } - - return pulls, resp, nil -} - -// Get a single pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#get-a-single-pull-request -func (s *PullRequestsService) Get(ctx context.Context, owner string, repo string, number int) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeLabelDescriptionSearchPreview, mediaTypeLockReasonPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - pull := new(PullRequest) - resp, err := s.client.Do(ctx, req, pull) - if err != nil { - return nil, resp, err - } - - return pull, resp, nil -} - -// GetRaw gets a single pull request in raw (diff or patch) format. -func (s *PullRequestsService) GetRaw(ctx context.Context, owner string, repo string, number int, opt RawOptions) (string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - switch opt.Type { - case Diff: - req.Header.Set("Accept", mediaTypeV3Diff) - case Patch: - req.Header.Set("Accept", mediaTypeV3Patch) - default: - return "", nil, fmt.Errorf("unsupported raw type %d", opt.Type) - } - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// NewPullRequest represents a new pull request to be created. -type NewPullRequest struct { - Title *string `json:"title,omitempty"` - Head *string `json:"head,omitempty"` - Base *string `json:"base,omitempty"` - Body *string `json:"body,omitempty"` - Issue *int `json:"issue,omitempty"` - MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"` -} - -// Create a new pull request on the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#create-a-pull-request -func (s *PullRequestsService) Create(ctx context.Context, owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) - req, err := s.client.NewRequest("POST", u, pull) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - - p := new(PullRequest) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -type pullRequestUpdate struct { - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - State *string `json:"state,omitempty"` - Base *string `json:"base,omitempty"` - MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"` -} - -// Edit a pull request. -// pull must not be nil. -// -// The following fields are editable: Title, Body, State, Base.Ref and MaintainerCanModify. -// Base.Ref updates the base branch of the pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#update-a-pull-request -func (s *PullRequestsService) Edit(ctx context.Context, owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) { - if pull == nil { - return nil, nil, fmt.Errorf("pull must be provided") - } - - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - - update := &pullRequestUpdate{ - Title: pull.Title, - Body: pull.Body, - State: pull.State, - MaintainerCanModify: pull.MaintainerCanModify, - } - if pull.Base != nil { - update.Base = pull.Base.Ref - } - - req, err := s.client.NewRequest("PATCH", u, update) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeLabelDescriptionSearchPreview, mediaTypeLockReasonPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - p := new(PullRequest) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListCommits lists the commits in a pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request -func (s *PullRequestsService) ListCommits(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var commits []*RepositoryCommit - resp, err := s.client.Do(ctx, req, &commits) - if err != nil { - return nil, resp, err - } - - return commits, resp, nil -} - -// ListFiles lists the files in a pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests-files -func (s *PullRequestsService) ListFiles(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*CommitFile, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var commitFiles []*CommitFile - resp, err := s.client.Do(ctx, req, &commitFiles) - if err != nil { - return nil, resp, err - } - - return commitFiles, resp, nil -} - -// IsMerged checks if a pull request has been merged. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged -func (s *PullRequestsService) IsMerged(ctx context.Context, owner string, repo string, number int) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - merged, err := parseBoolResponse(err) - return merged, resp, err -} - -// PullRequestMergeResult represents the result of merging a pull request. -type PullRequestMergeResult struct { - SHA *string `json:"sha,omitempty"` - Merged *bool `json:"merged,omitempty"` - Message *string `json:"message,omitempty"` -} - -// PullRequestOptions lets you define how a pull request will be merged. -type PullRequestOptions struct { - CommitTitle string // Extra detail to append to automatic commit message. (Optional.) - SHA string // SHA that pull request head must match to allow merge. (Optional.) - - // The merge method to use. Possible values include: "merge", "squash", and "rebase" with the default being merge. (Optional.) - MergeMethod string -} - -type pullRequestMergeRequest struct { - CommitMessage string `json:"commit_message"` - CommitTitle string `json:"commit_title,omitempty"` - MergeMethod string `json:"merge_method,omitempty"` - SHA string `json:"sha,omitempty"` -} - -// Merge a pull request (Merge Button™). -// commitMessage is the title for the automatic commit message. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade -func (s *PullRequestsService) Merge(ctx context.Context, owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) - - pullRequestBody := &pullRequestMergeRequest{CommitMessage: commitMessage} - if options != nil { - pullRequestBody.CommitTitle = options.CommitTitle - pullRequestBody.MergeMethod = options.MergeMethod - pullRequestBody.SHA = options.SHA - } - req, err := s.client.NewRequest("PUT", u, pullRequestBody) - if err != nil { - return nil, nil, err - } - - mergeResult := new(PullRequestMergeResult) - resp, err := s.client.Do(ctx, req, mergeResult) - if err != nil { - return nil, resp, err - } - - return mergeResult, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/pulls_comments.go b/vendor/github.com/google/go-github/github/pulls_comments.go deleted file mode 100644 index f3067762566a..000000000000 --- a/vendor/github.com/google/go-github/github/pulls_comments.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// PullRequestComment represents a comment left on a pull request. -type PullRequestComment struct { - ID *int64 `json:"id,omitempty"` - InReplyTo *int64 `json:"in_reply_to_id,omitempty"` - Body *string `json:"body,omitempty"` - Path *string `json:"path,omitempty"` - DiffHunk *string `json:"diff_hunk,omitempty"` - PullRequestReviewID *int64 `json:"pull_request_review_id,omitempty"` - Position *int `json:"position,omitempty"` - OriginalPosition *int `json:"original_position,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - OriginalCommitID *string `json:"original_commit_id,omitempty"` - User *User `json:"user,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - // AuthorAssociation is the comment author's relationship to the pull request's repository. - // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". - AuthorAssociation *string `json:"author_association,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - PullRequestURL *string `json:"pull_request_url,omitempty"` -} - -func (p PullRequestComment) String() string { - return Stringify(p) -} - -// PullRequestListCommentsOptions specifies the optional parameters to the -// PullRequestsService.ListComments method. -type PullRequestListCommentsOptions struct { - // Sort specifies how to sort comments. Possible values are: created, updated. - Sort string `url:"sort,omitempty"` - - // Direction in which to sort comments. Possible values are: asc, desc. - Direction string `url:"direction,omitempty"` - - // Since filters comments by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListComments lists all comments on the specified pull request. Specifying a -// pull request number of 0 will return all comments on all pull requests for -// the repository. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request -func (s *PullRequestsService) ListComments(ctx context.Context, owner string, repo string, number int, opt *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) { - var u string - if number == 0 { - u = fmt.Sprintf("repos/%v/%v/pulls/comments", owner, repo) - } else { - u = fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*PullRequestComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment fetches the specified pull request comment. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#get-a-single-comment -func (s *PullRequestsService) GetComment(ctx context.Context, owner string, repo string, commentID int64) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - comment := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, comment) - if err != nil { - return nil, resp, err - } - - return comment, resp, nil -} - -// CreateComment creates a new comment on the specified pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#create-a-comment -func (s *PullRequestsService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// CreateCommentInReplyTo creates a new comment as a reply to an existing pull request comment. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#alternative-input -func (s *PullRequestsService) CreateCommentInReplyTo(ctx context.Context, owner string, repo string, number int, body string, commentID int64) (*PullRequestComment, *Response, error) { - comment := &struct { - Body string `json:"body,omitempty"` - InReplyTo int64 `json:"in_reply_to,omitempty"` - }{ - Body: body, - InReplyTo: commentID, - } - u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// EditComment updates a pull request comment. -// A non-nil comment.Body must be provided. Other comment fields should be left nil. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#edit-a-comment -func (s *PullRequestsService) EditComment(ctx context.Context, owner string, repo string, commentID int64, comment *PullRequestComment) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes a pull request comment. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#delete-a-comment -func (s *PullRequestsService) DeleteComment(ctx context.Context, owner string, repo string, commentID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/pulls_reviewers.go b/vendor/github.com/google/go-github/github/pulls_reviewers.go deleted file mode 100644 index a1d785315067..000000000000 --- a/vendor/github.com/google/go-github/github/pulls_reviewers.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ReviewersRequest specifies users and teams for a pull request review request. -type ReviewersRequest struct { - Reviewers []string `json:"reviewers,omitempty"` - TeamReviewers []string `json:"team_reviewers,omitempty"` -} - -// Reviewers represents reviewers of a pull request. -type Reviewers struct { - Users []*User `json:"users,omitempty"` - Teams []*Team `json:"teams,omitempty"` -} - -// RequestReviewers creates a review request for the provided reviewers for the specified pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#create-a-review-request -func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number) - req, err := s.client.NewRequest("POST", u, &reviewers) - if err != nil { - return nil, nil, err - } - - r := new(PullRequest) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// ListReviewers lists reviewers whose reviews have been requested on the specified pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#list-review-requests -func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo string, number int, opt *ListOptions) (*Reviewers, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/requested_reviewers", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - reviewers := new(Reviewers) - resp, err := s.client.Do(ctx, req, reviewers) - if err != nil { - return nil, resp, err - } - - return reviewers, resp, nil -} - -// RemoveReviewers removes the review request for the provided reviewers for the specified pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#delete-a-review-request -func (s *PullRequestsService) RemoveReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, &reviewers) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/pulls_reviews.go b/vendor/github.com/google/go-github/github/pulls_reviews.go deleted file mode 100644 index 57d3c635e646..000000000000 --- a/vendor/github.com/google/go-github/github/pulls_reviews.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// PullRequestReview represents a review of a pull request. -type PullRequestReview struct { - ID *int64 `json:"id,omitempty"` - User *User `json:"user,omitempty"` - Body *string `json:"body,omitempty"` - SubmittedAt *time.Time `json:"submitted_at,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - PullRequestURL *string `json:"pull_request_url,omitempty"` - State *string `json:"state,omitempty"` -} - -func (p PullRequestReview) String() string { - return Stringify(p) -} - -// DraftReviewComment represents a comment part of the review. -type DraftReviewComment struct { - Path *string `json:"path,omitempty"` - Position *int `json:"position,omitempty"` - Body *string `json:"body,omitempty"` -} - -func (c DraftReviewComment) String() string { - return Stringify(c) -} - -// PullRequestReviewRequest represents a request to create a review. -type PullRequestReviewRequest struct { - CommitID *string `json:"commit_id,omitempty"` - Body *string `json:"body,omitempty"` - Event *string `json:"event,omitempty"` - Comments []*DraftReviewComment `json:"comments,omitempty"` -} - -func (r PullRequestReviewRequest) String() string { - return Stringify(r) -} - -// PullRequestReviewDismissalRequest represents a request to dismiss a review. -type PullRequestReviewDismissalRequest struct { - Message *string `json:"message,omitempty"` -} - -func (r PullRequestReviewDismissalRequest) String() string { - return Stringify(r) -} - -// ListReviews lists all reviews on the specified pull request. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request -func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var reviews []*PullRequestReview - resp, err := s.client.Do(ctx, req, &reviews) - if err != nil { - return nil, resp, err - } - - return reviews, resp, nil -} - -// GetReview fetches the specified pull request review. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#get-a-single-review -func (s *PullRequestsService) GetReview(ctx context.Context, owner, repo string, number int, reviewID int64) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - review := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, review) - if err != nil { - return nil, resp, err - } - - return review, resp, nil -} - -// DeletePendingReview deletes the specified pull request pending review. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#delete-a-pending-review -func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, repo string, number int, reviewID int64) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, nil, err - } - - review := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, review) - if err != nil { - return nil, resp, err - } - - return review, resp, nil -} - -// ListReviewComments lists all the comments for the specified review. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#get-comments-for-a-single-review -func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, repo string, number int, reviewID int64, opt *ListOptions) ([]*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/comments", owner, repo, number, reviewID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var comments []*PullRequestComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// CreateReview creates a new review on the specified pull request. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#create-a-pull-request-review -func (s *PullRequestsService) CreateReview(ctx context.Context, owner, repo string, number int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number) - - req, err := s.client.NewRequest("POST", u, review) - if err != nil { - return nil, nil, err - } - - r := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// SubmitReview submits a specified review on the specified pull request. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#submit-a-pull-request-review -func (s *PullRequestsService) SubmitReview(ctx context.Context, owner, repo string, number int, reviewID int64, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/events", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("POST", u, review) - if err != nil { - return nil, nil, err - } - - r := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DismissReview dismisses a specified review on the specified pull request. -// -// TODO: Follow up with GitHub support about an issue with this method's -// returned error format and remove this comment once it's fixed. -// Read more about it here - https://github.com/google/go-github/issues/540 -// -// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review -func (s *PullRequestsService) DismissReview(ctx context.Context, owner, repo string, number int, reviewID int64, review *PullRequestReviewDismissalRequest) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/dismissals", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("PUT", u, review) - if err != nil { - return nil, nil, err - } - - r := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/reactions.go b/vendor/github.com/google/go-github/github/reactions.go deleted file mode 100644 index ddc055cb1b4b..000000000000 --- a/vendor/github.com/google/go-github/github/reactions.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ReactionsService provides access to the reactions-related functions in the -// GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/reactions/ -type ReactionsService service - -// Reaction represents a GitHub reaction. -type Reaction struct { - // ID is the Reaction ID. - ID *int64 `json:"id,omitempty"` - User *User `json:"user,omitempty"` - NodeID *string `json:"node_id,omitempty"` - // Content is the type of reaction. - // Possible values are: - // "+1", "-1", "laugh", "confused", "heart", "hooray". - Content *string `json:"content,omitempty"` -} - -// Reactions represents a summary of GitHub reactions. -type Reactions struct { - TotalCount *int `json:"total_count,omitempty"` - PlusOne *int `json:"+1,omitempty"` - MinusOne *int `json:"-1,omitempty"` - Laugh *int `json:"laugh,omitempty"` - Confused *int `json:"confused,omitempty"` - Heart *int `json:"heart,omitempty"` - Hooray *int `json:"hooray,omitempty"` - URL *string `json:"url,omitempty"` -} - -func (r Reaction) String() string { - return Stringify(r) -} - -// ListCommentReactions lists the reactions for a commit comment. -// -// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-a-commit-comment -func (s *ReactionsService) ListCommentReactions(ctx context.Context, owner, repo string, id int64, opt *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateCommentReaction creates a reaction for a commit comment. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray". -// -// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-a-commit-comment -func (s ReactionsService) CreateCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListIssueReactions lists the reactions for an issue. -// -// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue -func (s *ReactionsService) ListIssueReactions(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateIssueReaction creates a reaction for an issue. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray". -// -// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue -func (s ReactionsService) CreateIssueReaction(ctx context.Context, owner, repo string, number int, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListIssueCommentReactions lists the reactions for an issue comment. -// -// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment -func (s *ReactionsService) ListIssueCommentReactions(ctx context.Context, owner, repo string, id int64, opt *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateIssueCommentReaction creates a reaction for an issue comment. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray". -// -// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment -func (s ReactionsService) CreateIssueCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListPullRequestCommentReactions lists the reactions for a pull request review comment. -// -// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment -func (s *ReactionsService) ListPullRequestCommentReactions(ctx context.Context, owner, repo string, id int64, opt *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreatePullRequestCommentReaction creates a reaction for a pull request review comment. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray". -// -// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment -func (s ReactionsService) CreatePullRequestCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListTeamDiscussionReactions lists the reactions for a team discussion. -// -// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-a-team-discussion -func (s *ReactionsService) ListTeamDiscussionReactions(ctx context.Context, teamID int64, discussionNumber int, opt *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/reactions", teamID, discussionNumber) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateTeamDiscussionReaction creates a reaction for a team discussion. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray". -// -// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-a-team-discussion -func (s *ReactionsService) CreateTeamDiscussionReaction(ctx context.Context, teamID int64, discussionNumber int, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/reactions", teamID, discussionNumber) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListTeamDiscussionCommentReactions lists the reactions for a team discussion comment. -// -// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-a-team-discussion-comment -func (s *ReactionsService) ListTeamDiscussionCommentReactions(ctx context.Context, teamID int64, discussionNumber, commentNumber int, opt *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v/reactions", teamID, discussionNumber, commentNumber) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - - return m, resp, nil -} - -// CreateTeamDiscussionCommentReaction creates a reaction for a team discussion comment. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray". -// -// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-a-team-discussion-comment -func (s *ReactionsService) CreateTeamDiscussionCommentReaction(ctx context.Context, teamID int64, discussionNumber, commentNumber int, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v/reactions", teamID, discussionNumber, commentNumber) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteReaction deletes a reaction. -// -// GitHub API docs: https://developer.github.com/v3/reaction/reactions/#delete-a-reaction-archive -func (s *ReactionsService) DeleteReaction(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("reactions/%v", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go deleted file mode 100644 index 617c20db5450..000000000000 --- a/vendor/github.com/google/go-github/github/repos.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" -) - -// RepositoriesService handles communication with the repository related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/repos/ -type RepositoriesService service - -// Repository represents a GitHub repository. -type Repository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - MirrorURL *string `json:"mirror_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Language *string `json:"language,omitempty"` - Fork *bool `json:"fork,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - NetworkCount *int `json:"network_count,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - StargazersCount *int `json:"stargazers_count,omitempty"` - SubscribersCount *int `json:"subscribers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` - Size *int `json:"size,omitempty"` - AutoInit *bool `json:"auto_init,omitempty"` - Parent *Repository `json:"parent,omitempty"` - Source *Repository `json:"source,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Permissions *map[string]bool `json:"permissions,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - Topics []string `json:"topics,omitempty"` - Archived *bool `json:"archived,omitempty"` - - // Only provided when using RepositoriesService.Get while in preview - License *License `json:"license,omitempty"` - - // Additional mutable fields when creating and editing a repository - Private *bool `json:"private,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - HasPages *bool `json:"has_pages,omitempty"` - HasProjects *bool `json:"has_projects,omitempty"` - HasDownloads *bool `json:"has_downloads,omitempty"` - LicenseTemplate *string `json:"license_template,omitempty"` - GitignoreTemplate *string `json:"gitignore_template,omitempty"` - - // Creating an organization repository. Required for non-owners. - TeamID *int64 `json:"team_id,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - ArchiveURL *string `json:"archive_url,omitempty"` - AssigneesURL *string `json:"assignees_url,omitempty"` - BlobsURL *string `json:"blobs_url,omitempty"` - BranchesURL *string `json:"branches_url,omitempty"` - CollaboratorsURL *string `json:"collaborators_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - CommitsURL *string `json:"commits_url,omitempty"` - CompareURL *string `json:"compare_url,omitempty"` - ContentsURL *string `json:"contents_url,omitempty"` - ContributorsURL *string `json:"contributors_url,omitempty"` - DeploymentsURL *string `json:"deployments_url,omitempty"` - DownloadsURL *string `json:"downloads_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - ForksURL *string `json:"forks_url,omitempty"` - GitCommitsURL *string `json:"git_commits_url,omitempty"` - GitRefsURL *string `json:"git_refs_url,omitempty"` - GitTagsURL *string `json:"git_tags_url,omitempty"` - HooksURL *string `json:"hooks_url,omitempty"` - IssueCommentURL *string `json:"issue_comment_url,omitempty"` - IssueEventsURL *string `json:"issue_events_url,omitempty"` - IssuesURL *string `json:"issues_url,omitempty"` - KeysURL *string `json:"keys_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - LanguagesURL *string `json:"languages_url,omitempty"` - MergesURL *string `json:"merges_url,omitempty"` - MilestonesURL *string `json:"milestones_url,omitempty"` - NotificationsURL *string `json:"notifications_url,omitempty"` - PullsURL *string `json:"pulls_url,omitempty"` - ReleasesURL *string `json:"releases_url,omitempty"` - StargazersURL *string `json:"stargazers_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - SubscribersURL *string `json:"subscribers_url,omitempty"` - SubscriptionURL *string `json:"subscription_url,omitempty"` - TagsURL *string `json:"tags_url,omitempty"` - TreesURL *string `json:"trees_url,omitempty"` - TeamsURL *string `json:"teams_url,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://developer.github.com/v3/search/#text-match-metadata - TextMatches []TextMatch `json:"text_matches,omitempty"` -} - -func (r Repository) String() string { - return Stringify(r) -} - -// RepositoryListOptions specifies the optional parameters to the -// RepositoriesService.List method. -type RepositoryListOptions struct { - // Visibility of repositories to list. Can be one of all, public, or private. - // Default: all - Visibility string `url:"visibility,omitempty"` - - // List repos of given affiliation[s]. - // Comma-separated list of values. Can include: - // * owner: Repositories that are owned by the authenticated user. - // * collaborator: Repositories that the user has been added to as a - // collaborator. - // * organization_member: Repositories that the user has access to through - // being a member of an organization. This includes every repository on - // every team that the user is on. - // Default: owner,collaborator,organization_member - Affiliation string `url:"affiliation,omitempty"` - - // Type of repositories to list. - // Can be one of all, owner, public, private, member. Default: all - // Will cause a 422 error if used in the same request as visibility or - // affiliation. - Type string `url:"type,omitempty"` - - // How to sort the repository list. Can be one of created, updated, pushed, - // full_name. Default: full_name - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Can be one of asc or desc. - // Default: when using full_name: asc; otherwise desc - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// List the repositories for a user. Passing the empty string will list -// repositories for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-user-repositories -func (s *RepositoriesService) List(ctx context.Context, user string, opt *RepositoryListOptions) ([]*Repository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/repos", user) - } else { - u = "user/repos" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// RepositoryListByOrgOptions specifies the optional parameters to the -// RepositoriesService.ListByOrg method. -type RepositoryListByOrgOptions struct { - // Type of repositories to list. Possible values are: all, public, private, - // forks, sources, member. Default is "all". - Type string `url:"type,omitempty"` - - ListOptions -} - -// ListByOrg lists the repositories for an organization. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-organization-repositories -func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opt *RepositoryListByOrgOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("orgs/%v/repos", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// RepositoryListAllOptions specifies the optional parameters to the -// RepositoriesService.ListAll method. -type RepositoryListAllOptions struct { - // ID of the last repository seen - Since int64 `url:"since,omitempty"` -} - -// ListAll lists all GitHub repositories in the order that they were created. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-all-public-repositories -func (s *RepositoriesService) ListAll(ctx context.Context, opt *RepositoryListAllOptions) ([]*Repository, *Response, error) { - u, err := addOptions("repositories", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// createRepoRequest is a subset of Repository and is used internally -// by Create to pass only the known fields for the endpoint. -// -// See https://github.com/google/go-github/issues/1014 for more -// information. -type createRepoRequest struct { - // Name is required when creating a repo. - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - - Private *bool `json:"private,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasProjects *bool `json:"has_projects,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - - // Creating an organization repository. Required for non-owners. - TeamID *int64 `json:"team_id,omitempty"` - - AutoInit *bool `json:"auto_init,omitempty"` - GitignoreTemplate *string `json:"gitignore_template,omitempty"` - LicenseTemplate *string `json:"license_template,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` -} - -// Create a new repository. If an organization is specified, the new -// repository will be created under that org. If the empty string is -// specified, it will be created for the authenticated user. -// -// Note that only a subset of the repo fields are used and repo must -// not be nil. -// -// GitHub API docs: https://developer.github.com/v3/repos/#create -func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repository) (*Repository, *Response, error) { - var u string - if org != "" { - u = fmt.Sprintf("orgs/%v/repos", org) - } else { - u = "user/repos" - } - - repoReq := &createRepoRequest{ - Name: repo.Name, - Description: repo.Description, - Homepage: repo.Homepage, - Private: repo.Private, - HasIssues: repo.HasIssues, - HasProjects: repo.HasProjects, - HasWiki: repo.HasWiki, - TeamID: repo.TeamID, - AutoInit: repo.AutoInit, - GitignoreTemplate: repo.GitignoreTemplate, - LicenseTemplate: repo.LicenseTemplate, - AllowSquashMerge: repo.AllowSquashMerge, - AllowMergeCommit: repo.AllowMergeCommit, - AllowRebaseMerge: repo.AllowRebaseMerge, - } - - req, err := s.client.NewRequest("POST", u, repoReq) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// Get fetches a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#get -func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when the license support fully launches - // https://developer.github.com/v3/licenses/#get-a-repositorys-license - acceptHeaders := []string{mediaTypeCodesOfConductPreview, mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// GetCodeOfConduct gets the contents of a repository's code of conduct. -// -// GitHub API docs: https://developer.github.com/v3/codes_of_conduct/#get-the-contents-of-a-repositorys-code-of-conduct -func (s *RepositoriesService) GetCodeOfConduct(ctx context.Context, owner, repo string) (*CodeOfConduct, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/community/code_of_conduct", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - - coc := new(CodeOfConduct) - resp, err := s.client.Do(ctx, req, coc) - if err != nil { - return nil, resp, err - } - - return coc, resp, nil -} - -// GetByID fetches a repository. -// -// Note: GetByID uses the undocumented GitHub API endpoint /repositories/:id. -func (s *RepositoriesService) GetByID(ctx context.Context, id int64) (*Repository, *Response, error) { - u := fmt.Sprintf("repositories/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// Edit updates a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#edit -func (s *RepositoriesService) Edit(ctx context.Context, owner, repo string, repository *Repository) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("PATCH", u, repository) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// Delete a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#delete-a-repository -func (s *RepositoriesService) Delete(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Contributor represents a repository contributor -type Contributor struct { - Login *string `json:"login,omitempty"` - ID *int64 `json:"id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - Contributions *int `json:"contributions,omitempty"` -} - -// ListContributorsOptions specifies the optional parameters to the -// RepositoriesService.ListContributors method. -type ListContributorsOptions struct { - // Include anonymous contributors in results or not - Anon string `url:"anon,omitempty"` - - ListOptions -} - -// ListContributors lists contributors for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-contributors -func (s *RepositoriesService) ListContributors(ctx context.Context, owner string, repository string, opt *ListContributorsOptions) ([]*Contributor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var contributor []*Contributor - resp, err := s.client.Do(ctx, req, &contributor) - if err != nil { - return nil, nil, err - } - - return contributor, resp, nil -} - -// ListLanguages lists languages for the specified repository. The returned map -// specifies the languages and the number of bytes of code written in that -// language. For example: -// -// { -// "C": 78769, -// "Python": 7769 -// } -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-languages -func (s *RepositoriesService) ListLanguages(ctx context.Context, owner string, repo string) (map[string]int, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/languages", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - languages := make(map[string]int) - resp, err := s.client.Do(ctx, req, &languages) - if err != nil { - return nil, resp, err - } - - return languages, resp, nil -} - -// ListTeams lists the teams for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-teams -func (s *RepositoriesService) ListTeams(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/teams", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// RepositoryTag represents a repository tag. -type RepositoryTag struct { - Name *string `json:"name,omitempty"` - Commit *Commit `json:"commit,omitempty"` - ZipballURL *string `json:"zipball_url,omitempty"` - TarballURL *string `json:"tarball_url,omitempty"` -} - -// ListTags lists tags for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-tags -func (s *RepositoriesService) ListTags(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*RepositoryTag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/tags", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var tags []*RepositoryTag - resp, err := s.client.Do(ctx, req, &tags) - if err != nil { - return nil, resp, err - } - - return tags, resp, nil -} - -// Branch represents a repository branch -type Branch struct { - Name *string `json:"name,omitempty"` - Commit *RepositoryCommit `json:"commit,omitempty"` - Protected *bool `json:"protected,omitempty"` -} - -// Protection represents a repository branch's protection. -type Protection struct { - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` - RequiredPullRequestReviews *PullRequestReviewsEnforcement `json:"required_pull_request_reviews"` - EnforceAdmins *AdminEnforcement `json:"enforce_admins"` - Restrictions *BranchRestrictions `json:"restrictions"` -} - -// ProtectionRequest represents a request to create/edit a branch's protection. -type ProtectionRequest struct { - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` - RequiredPullRequestReviews *PullRequestReviewsEnforcementRequest `json:"required_pull_request_reviews"` - EnforceAdmins bool `json:"enforce_admins"` - Restrictions *BranchRestrictionsRequest `json:"restrictions"` -} - -// RequiredStatusChecks represents the protection status of a individual branch. -type RequiredStatusChecks struct { - // Require branches to be up to date before merging. (Required.) - Strict bool `json:"strict"` - // The list of status checks to require in order to merge into this - // branch. (Required; use []string{} instead of nil for empty list.) - Contexts []string `json:"contexts"` -} - -// RequiredStatusChecksRequest represents a request to edit a protected branch's status checks. -type RequiredStatusChecksRequest struct { - Strict *bool `json:"strict,omitempty"` - Contexts []string `json:"contexts,omitempty"` -} - -// PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch. -type PullRequestReviewsEnforcement struct { - // Specifies which users and teams can dismiss pull request reviews. - DismissalRestrictions DismissalRestrictions `json:"dismissal_restrictions"` - // Specifies if approved reviews are dismissed automatically, when a new commit is pushed. - DismissStaleReviews bool `json:"dismiss_stale_reviews"` - // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. - RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` - // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1-6. - RequiredApprovingReviewCount int `json:"required_approving_review_count"` -} - -// PullRequestReviewsEnforcementRequest represents request to set the pull request review -// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcement above -// because the request structure is different from the response structure. -type PullRequestReviewsEnforcementRequest struct { - // Specifies which users and teams should be allowed to dismiss pull request reviews. - // User and team dismissal restrictions are only available for - // organization-owned repositories. Must be nil for personal repositories. - DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` - // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. (Required) - DismissStaleReviews bool `json:"dismiss_stale_reviews"` - // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. - RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` - // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1-6. - RequiredApprovingReviewCount int `json:"required_approving_review_count"` -} - -// PullRequestReviewsEnforcementUpdate represents request to patch the pull request review -// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcementRequest above -// because the patch request does not require all fields to be initialized. -type PullRequestReviewsEnforcementUpdate struct { - // Specifies which users and teams can dismiss pull request reviews. Can be omitted. - DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` - // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be omitted. - DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"` - // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. - RequireCodeOwnerReviews bool `json:"require_code_owner_reviews,omitempty"` - // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1 - 6. - RequiredApprovingReviewCount int `json:"required_approving_review_count"` -} - -// AdminEnforcement represents the configuration to enforce required status checks for repository administrators. -type AdminEnforcement struct { - URL *string `json:"url,omitempty"` - Enabled bool `json:"enabled"` -} - -// BranchRestrictions represents the restriction that only certain users or -// teams may push to a branch. -type BranchRestrictions struct { - // The list of user logins with push access. - Users []*User `json:"users"` - // The list of team slugs with push access. - Teams []*Team `json:"teams"` -} - -// BranchRestrictionsRequest represents the request to create/edit the -// restriction that only certain users or teams may push to a branch. It is -// separate from BranchRestrictions above because the request structure is -// different from the response structure. -type BranchRestrictionsRequest struct { - // The list of user logins with push access. (Required; use []string{} instead of nil for empty list.) - Users []string `json:"users"` - // The list of team slugs with push access. (Required; use []string{} instead of nil for empty list.) - Teams []string `json:"teams"` -} - -// DismissalRestrictions specifies which users and teams can dismiss pull request reviews. -type DismissalRestrictions struct { - // The list of users who can dimiss pull request reviews. - Users []*User `json:"users"` - // The list of teams which can dismiss pull request reviews. - Teams []*Team `json:"teams"` -} - -// DismissalRestrictionsRequest represents the request to create/edit the -// restriction to allows only specific users or teams to dimiss pull request reviews. It is -// separate from DismissalRestrictions above because the request structure is -// different from the response structure. -// Note: Both Users and Teams must be nil, or both must be non-nil. -type DismissalRestrictionsRequest struct { - // The list of user logins who can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) - Users *[]string `json:"users,omitempty"` - // The list of team slugs which can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) - Teams *[]string `json:"teams,omitempty"` -} - -// SignaturesProtectedBranch represents the protection status of an individual branch. -type SignaturesProtectedBranch struct { - URL *string `json:"url,omitempty"` - // Commits pushed to matching branches must have verified signatures. - Enabled *bool `json:"enabled,omitempty"` -} - -// ListBranches lists branches for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-branches -func (s *RepositoriesService) ListBranches(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - var branches []*Branch - resp, err := s.client.Do(ctx, req, &branches) - if err != nil { - return nil, resp, err - } - - return branches, resp, nil -} - -// GetBranch gets the specified branch for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#get-branch -func (s *RepositoriesService) GetBranch(ctx context.Context, owner, repo, branch string) (*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - b := new(Branch) - resp, err := s.client.Do(ctx, req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// GetBranchProtection gets the protection of a given branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-branch-protection -func (s *RepositoriesService) GetBranchProtection(ctx context.Context, owner, repo, branch string) (*Protection, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - p := new(Protection) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetRequiredStatusChecks gets the required status checks for a given protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-required-status-checks-of-protected-branch -func (s *RepositoriesService) GetRequiredStatusChecks(ctx context.Context, owner, repo, branch string) (*RequiredStatusChecks, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - p := new(RequiredStatusChecks) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListRequiredStatusChecksContexts lists the required status checks contexts for a given protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#list-required-status-checks-contexts-of-protected-branch -func (s *RepositoriesService) ListRequiredStatusChecksContexts(ctx context.Context, owner, repo, branch string) (contexts []string, resp *Response, err error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks/contexts", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - resp, err = s.client.Do(ctx, req, &contexts) - if err != nil { - return nil, resp, err - } - - return contexts, resp, nil -} - -// UpdateBranchProtection updates the protection of a given branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-branch-protection -func (s *RepositoriesService) UpdateBranchProtection(ctx context.Context, owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) - req, err := s.client.NewRequest("PUT", u, preq) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - p := new(Protection) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// RemoveBranchProtection removes the protection of a given branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-branch-protection -func (s *RepositoriesService) RemoveBranchProtection(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - return s.client.Do(ctx, req, nil) -} - -// GetSignaturesProtectedBranch gets required signatures of protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-required-signatures-of-protected-branch -func (s *RepositoriesService) GetSignaturesProtectedBranch(ctx context.Context, owner, repo, branch string) (*SignaturesProtectedBranch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeSignaturePreview) - - p := new(SignaturesProtectedBranch) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// RequireSignaturesOnProtectedBranch makes signed commits required on a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#add-required-signatures-of-protected-branch -func (s *RepositoriesService) RequireSignaturesOnProtectedBranch(ctx context.Context, owner, repo, branch string) (*SignaturesProtectedBranch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeSignaturePreview) - - r := new(SignaturesProtectedBranch) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// OptionalSignaturesOnProtectedBranch removes required signed commits on a given branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-required-signatures-of-protected-branch -func (s *RepositoriesService) OptionalSignaturesOnProtectedBranch(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeSignaturePreview) - - return s.client.Do(ctx, req, nil) -} - -// UpdateRequiredStatusChecks updates the required status checks for a given protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-required-status-checks-of-protected-branch -func (s *RepositoriesService) UpdateRequiredStatusChecks(ctx context.Context, owner, repo, branch string, sreq *RequiredStatusChecksRequest) (*RequiredStatusChecks, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) - req, err := s.client.NewRequest("PATCH", u, sreq) - if err != nil { - return nil, nil, err - } - - sc := new(RequiredStatusChecks) - resp, err := s.client.Do(ctx, req, sc) - if err != nil { - return nil, resp, err - } - - return sc, resp, nil -} - -// License gets the contents of a repository's license if one is detected. -// -// GitHub API docs: https://developer.github.com/v3/licenses/#get-the-contents-of-a-repositorys-license -func (s *RepositoriesService) License(ctx context.Context, owner, repo string) (*RepositoryLicense, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/license", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - r := &RepositoryLicense{} - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// GetPullRequestReviewEnforcement gets pull request review enforcement of a protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-pull-request-review-enforcement-of-protected-branch -func (s *RepositoriesService) GetPullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(PullRequestReviewsEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdatePullRequestReviewEnforcement patches pull request review enforcement of a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-pull-request-review-enforcement-of-protected-branch -func (s *RepositoriesService) UpdatePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string, patch *PullRequestReviewsEnforcementUpdate) (*PullRequestReviewsEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - req, err := s.client.NewRequest("PATCH", u, patch) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(PullRequestReviewsEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// DisableDismissalRestrictions disables dismissal restrictions of a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-pull-request-review-enforcement-of-protected-branch -func (s *RepositoriesService) DisableDismissalRestrictions(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - - data := struct { - R []interface{} `json:"dismissal_restrictions"` - }{[]interface{}{}} - - req, err := s.client.NewRequest("PATCH", u, data) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(PullRequestReviewsEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// RemovePullRequestReviewEnforcement removes pull request enforcement of a protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-pull-request-review-enforcement-of-protected-branch -func (s *RepositoriesService) RemovePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - return s.client.Do(ctx, req, nil) -} - -// GetAdminEnforcement gets admin enforcement information of a protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-admin-enforcement-of-protected-branch -func (s *RepositoriesService) GetAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(AdminEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// AddAdminEnforcement adds admin enforcement to a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#add-admin-enforcement-of-protected-branch -func (s *RepositoriesService) AddAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(AdminEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// RemoveAdminEnforcement removes admin enforcement from a protected branch. -// -// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-admin-enforcement-of-protected-branch -func (s *RepositoriesService) RemoveAdminEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - return s.client.Do(ctx, req, nil) -} - -// repositoryTopics represents a collection of repository topics. -type repositoryTopics struct { - Names []string `json:"names"` -} - -// ListAllTopics lists topics for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-all-topics-for-a-repository -func (s *RepositoriesService) ListAllTopics(ctx context.Context, owner, repo string) ([]string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/topics", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - - topics := new(repositoryTopics) - resp, err := s.client.Do(ctx, req, topics) - if err != nil { - return nil, resp, err - } - - return topics.Names, resp, nil -} - -// ReplaceAllTopics replaces topics for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#replace-all-topics-for-a-repository -func (s *RepositoriesService) ReplaceAllTopics(ctx context.Context, owner, repo string, topics []string) ([]string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/topics", owner, repo) - t := &repositoryTopics{ - Names: topics, - } - if t.Names == nil { - t.Names = []string{} - } - req, err := s.client.NewRequest("PUT", u, t) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - - t = new(repositoryTopics) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t.Names, resp, nil -} - -// TransferRequest represents a request to transfer a repository. -type TransferRequest struct { - NewOwner string `json:"new_owner"` - TeamID []int64 `json:"team_ids,omitempty"` -} - -// Transfer transfers a repository from one account or organization to another. -// -// This method might return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it has now scheduled the transfer of the repository in a background task. -// A follow up request, after a delay of a second or so, should result -// in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/#transfer-a-repository -func (s *RepositoriesService) Transfer(ctx context.Context, owner, repo string, transfer TransferRequest) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/transfer", owner, repo) - - req, err := s.client.NewRequest("POST", u, &transfer) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeRepositoryTransferPreview) - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_collaborators.go b/vendor/github.com/google/go-github/github/repos_collaborators.go deleted file mode 100644 index 757e9f39e44b..000000000000 --- a/vendor/github.com/google/go-github/github/repos_collaborators.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListCollaboratorsOptions specifies the optional parameters to the -// RepositoriesService.ListCollaborators method. -type ListCollaboratorsOptions struct { - // Affiliation specifies how collaborators should be filtered by their affiliation. - // Possible values are: - // outside - All outside collaborators of an organization-owned repository - // direct - All collaborators with permissions to an organization-owned repository, - // regardless of organization membership status - // all - All collaborators the authenticated user can see - // - // Default value is "all". - Affiliation string `url:"affiliation,omitempty"` - - ListOptions -} - -// ListCollaborators lists the GitHub users that have access to the repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#list-collaborators -func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo string, opt *ListCollaboratorsOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// IsCollaborator checks whether the specified GitHub user has collaborator -// access to the given repo. -// Note: This will return false if the user is not a collaborator OR the user -// is not a GitHub user. -// -// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#get -func (s *RepositoriesService) IsCollaborator(ctx context.Context, owner, repo, user string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - isCollab, err := parseBoolResponse(err) - return isCollab, resp, err -} - -// RepositoryPermissionLevel represents the permission level an organization -// member has for a given repository. -type RepositoryPermissionLevel struct { - // Possible values: "admin", "write", "read", "none" - Permission *string `json:"permission,omitempty"` - - User *User `json:"user,omitempty"` -} - -// GetPermissionLevel retrieves the specific permission level a collaborator has for a given repository. -// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#review-a-users-permission-level -func (s *RepositoriesService) GetPermissionLevel(ctx context.Context, owner, repo, user string) (*RepositoryPermissionLevel, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v/permission", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - rpl := new(RepositoryPermissionLevel) - resp, err := s.client.Do(ctx, req, rpl) - if err != nil { - return nil, resp, err - } - return rpl, resp, nil -} - -// RepositoryAddCollaboratorOptions specifies the optional parameters to the -// RepositoriesService.AddCollaborator method. -type RepositoryAddCollaboratorOptions struct { - // Permission specifies the permission to grant the user on this repository. - // Possible values are: - // pull - team members can pull, but not push to or administer this repository - // push - team members can pull and push, but not administer this repository - // admin - team members can pull, push and administer this repository - // - // Default value is "push". This option is only valid for organization-owned repositories. - Permission string `json:"permission,omitempty"` -} - -// AddCollaborator sends an invitation to the specified GitHub user -// to become a collaborator to the given repo. -// -// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#add-user-as-a-collaborator -func (s *RepositoriesService) AddCollaborator(ctx context.Context, owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveCollaborator removes the specified GitHub user as collaborator from the given repo. -// Note: Does not return error if a valid user that is not a collaborator is removed. -// -// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#remove-collaborator -func (s *RepositoriesService) RemoveCollaborator(ctx context.Context, owner, repo, user string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_comments.go b/vendor/github.com/google/go-github/github/repos_comments.go deleted file mode 100644 index fa2377d403da..000000000000 --- a/vendor/github.com/google/go-github/github/repos_comments.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// RepositoryComment represents a comment for a commit, file, or line in a repository. -type RepositoryComment struct { - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - ID *int64 `json:"id,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - User *User `json:"user,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - - // User-mutable fields - Body *string `json:"body"` - // User-initialized fields - Path *string `json:"path,omitempty"` - Position *int `json:"position,omitempty"` -} - -func (r RepositoryComment) String() string { - return Stringify(r) -} - -// ListComments lists all the comments for the repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/comments/#list-commit-comments-for-a-repository -func (s *RepositoriesService) ListComments(ctx context.Context, owner, repo string, opt *ListOptions) ([]*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*RepositoryComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// ListCommitComments lists all the comments for a given commit SHA. -// -// GitHub API docs: https://developer.github.com/v3/repos/comments/#list-comments-for-a-single-commit -func (s *RepositoriesService) ListCommitComments(ctx context.Context, owner, repo, sha string, opt *ListOptions) ([]*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*RepositoryComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// CreateComment creates a comment for the given commit. -// Note: GitHub allows for comments to be created for non-existing files and positions. -// -// GitHub API docs: https://developer.github.com/v3/repos/comments/#create-a-commit-comment -func (s *RepositoriesService) CreateComment(ctx context.Context, owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// GetComment gets a single comment from a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/comments/#get-a-single-commit-comment -func (s *RepositoriesService) GetComment(ctx context.Context, owner, repo string, id int64) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - c := new(RepositoryComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// UpdateComment updates the body of a single comment. -// -// GitHub API docs: https://developer.github.com/v3/repos/comments/#update-a-commit-comment -func (s *RepositoriesService) UpdateComment(ctx context.Context, owner, repo string, id int64, comment *RepositoryComment) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes a single comment from a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/comments/#delete-a-commit-comment -func (s *RepositoriesService) DeleteComment(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go deleted file mode 100644 index 0dcfe62cb474..000000000000 --- a/vendor/github.com/google/go-github/github/repos_commits.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" - "time" -) - -// RepositoryCommit represents a commit in a repo. -// Note that it's wrapping a Commit, so author/committer information is in two places, -// but contain different details about them: in RepositoryCommit "github details", in Commit - "git details". -type RepositoryCommit struct { - SHA *string `json:"sha,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Author *User `json:"author,omitempty"` - Committer *User `json:"committer,omitempty"` - Parents []Commit `json:"parents,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - - // Details about how many changes were made in this commit. Only filled in during GetCommit! - Stats *CommitStats `json:"stats,omitempty"` - // Details about which files, and how this commit touched. Only filled in during GetCommit! - Files []CommitFile `json:"files,omitempty"` -} - -func (r RepositoryCommit) String() string { - return Stringify(r) -} - -// CommitStats represents the number of additions / deletions from a file in a given RepositoryCommit or GistCommit. -type CommitStats struct { - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - Total *int `json:"total,omitempty"` -} - -func (c CommitStats) String() string { - return Stringify(c) -} - -// CommitFile represents a file modified in a commit. -type CommitFile struct { - SHA *string `json:"sha,omitempty"` - Filename *string `json:"filename,omitempty"` - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - Changes *int `json:"changes,omitempty"` - Status *string `json:"status,omitempty"` - Patch *string `json:"patch,omitempty"` - BlobURL *string `json:"blob_url,omitempty"` - RawURL *string `json:"raw_url,omitempty"` - ContentsURL *string `json:"contents_url,omitempty"` - PreviousFilename *string `json:"previous_filename,omitempty"` -} - -func (c CommitFile) String() string { - return Stringify(c) -} - -// CommitsComparison is the result of comparing two commits. -// See CompareCommits() for details. -type CommitsComparison struct { - BaseCommit *RepositoryCommit `json:"base_commit,omitempty"` - MergeBaseCommit *RepositoryCommit `json:"merge_base_commit,omitempty"` - - // Head can be 'behind' or 'ahead' - Status *string `json:"status,omitempty"` - AheadBy *int `json:"ahead_by,omitempty"` - BehindBy *int `json:"behind_by,omitempty"` - TotalCommits *int `json:"total_commits,omitempty"` - - Commits []RepositoryCommit `json:"commits,omitempty"` - - Files []CommitFile `json:"files,omitempty"` - - HTMLURL *string `json:"html_url,omitempty"` - PermalinkURL *string `json:"permalink_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` - URL *string `json:"url,omitempty"` // API URL. -} - -func (c CommitsComparison) String() string { - return Stringify(c) -} - -// CommitsListOptions specifies the optional parameters to the -// RepositoriesService.ListCommits method. -type CommitsListOptions struct { - // SHA or branch to start listing Commits from. - SHA string `url:"sha,omitempty"` - - // Path that should be touched by the returned Commits. - Path string `url:"path,omitempty"` - - // Author of by which to filter Commits. - Author string `url:"author,omitempty"` - - // Since when should Commits be included in the response. - Since time.Time `url:"since,omitempty"` - - // Until when should Commits be included in the response. - Until time.Time `url:"until,omitempty"` - - ListOptions -} - -// ListCommits lists the commits of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/commits/#list -func (s *RepositoriesService) ListCommits(ctx context.Context, owner, repo string, opt *CommitsListOptions) ([]*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var commits []*RepositoryCommit - resp, err := s.client.Do(ctx, req, &commits) - if err != nil { - return nil, resp, err - } - - return commits, resp, nil -} - -// GetCommit fetches the specified commit, including all details about it. -// -// GitHub API docs: https://developer.github.com/v3/repos/commits/#get-a-single-commit -// See also: https://developer.github.com/v3/git/commits/#get-a-single-commit provides the same functionality -func (s *RepositoriesService) GetCommit(ctx context.Context, owner, repo, sha string) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(ctx, req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, nil -} - -// GetCommitRaw fetches the specified commit in raw (diff or patch) format. -func (s *RepositoriesService) GetCommitRaw(ctx context.Context, owner string, repo string, sha string, opt RawOptions) (string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - switch opt.Type { - case Diff: - req.Header.Set("Accept", mediaTypeV3Diff) - case Patch: - req.Header.Set("Accept", mediaTypeV3Patch) - default: - return "", nil, fmt.Errorf("unsupported raw type %d", opt.Type) - } - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// GetCommitSHA1 gets the SHA-1 of a commit reference. If a last-known SHA1 is -// supplied and no new commits have occurred, a 304 Unmodified response is returned. -// -// GitHub API docs: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference -func (s *RepositoriesService) GetCommitSHA1(ctx context.Context, owner, repo, ref, lastSHA string) (string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, ref) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - if lastSHA != "" { - req.Header.Set("If-None-Match", `"`+lastSHA+`"`) - } - - req.Header.Set("Accept", mediaTypeV3SHA) - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// CompareCommits compares a range of commits with each other. -// todo: support media formats - https://github.com/google/go-github/issues/6 -// -// GitHub API docs: https://developer.github.com/v3/repos/commits/#compare-two-commits -func (s *RepositoriesService) CompareCommits(ctx context.Context, owner, repo string, base, head string) (*CommitsComparison, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, base, head) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comp := new(CommitsComparison) - resp, err := s.client.Do(ctx, req, comp) - if err != nil { - return nil, resp, err - } - - return comp, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_community_health.go b/vendor/github.com/google/go-github/github/repos_community_health.go deleted file mode 100644 index 73d1d573bfff..000000000000 --- a/vendor/github.com/google/go-github/github/repos_community_health.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// Metric represents the different fields for one file in community health files. -type Metric struct { - Name *string `json:"name"` - Key *string `json:"key"` - URL *string `json:"url"` - HTMLURL *string `json:"html_url"` -} - -// CommunityHealthFiles represents the different files in the community health metrics response. -type CommunityHealthFiles struct { - CodeOfConduct *Metric `json:"code_of_conduct"` - Contributing *Metric `json:"contributing"` - IssueTemplate *Metric `json:"issue_template"` - PullRequestTemplate *Metric `json:"pull_request_template"` - License *Metric `json:"license"` - Readme *Metric `json:"readme"` -} - -// CommunityHealthMetrics represents a response containing the community metrics of a repository. -type CommunityHealthMetrics struct { - HealthPercentage *int `json:"health_percentage"` - Files *CommunityHealthFiles `json:"files"` - UpdatedAt *time.Time `json:"updated_at"` -} - -// GetCommunityHealthMetrics retrieves all the community health metrics for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/community/#retrieve-community-health-metrics -func (s *RepositoriesService) GetCommunityHealthMetrics(ctx context.Context, owner, repo string) (*CommunityHealthMetrics, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/community/profile", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeRepositoryCommunityHealthMetricsPreview) - - metrics := &CommunityHealthMetrics{} - resp, err := s.client.Do(ctx, req, metrics) - if err != nil { - return nil, resp, err - } - - return metrics, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_contents.go b/vendor/github.com/google/go-github/github/repos_contents.go deleted file mode 100644 index ffb56b90df2b..000000000000 --- a/vendor/github.com/google/go-github/github/repos_contents.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Repository contents API methods. -// GitHub API docs: https://developer.github.com/v3/repos/contents/ - -package github - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path" -) - -// RepositoryContent represents a file or directory in a github repository. -type RepositoryContent struct { - Type *string `json:"type,omitempty"` - Encoding *string `json:"encoding,omitempty"` - Size *int `json:"size,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - // Content contains the actual file content, which may be encoded. - // Callers should call GetContent which will decode the content if - // necessary. - Content *string `json:"content,omitempty"` - SHA *string `json:"sha,omitempty"` - URL *string `json:"url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DownloadURL *string `json:"download_url,omitempty"` -} - -// RepositoryContentResponse holds the parsed response from CreateFile, UpdateFile, and DeleteFile. -type RepositoryContentResponse struct { - Content *RepositoryContent `json:"content,omitempty"` - Commit `json:"commit,omitempty"` -} - -// RepositoryContentFileOptions specifies optional parameters for CreateFile, UpdateFile, and DeleteFile. -type RepositoryContentFileOptions struct { - Message *string `json:"message,omitempty"` - Content []byte `json:"content,omitempty"` // unencoded - SHA *string `json:"sha,omitempty"` - Branch *string `json:"branch,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` -} - -// RepositoryContentGetOptions represents an optional ref parameter, which can be a SHA, -// branch, or tag -type RepositoryContentGetOptions struct { - Ref string `url:"ref,omitempty"` -} - -// String converts RepositoryContent to a string. It's primarily for testing. -func (r RepositoryContent) String() string { - return Stringify(r) -} - -// GetContent returns the content of r, decoding it if necessary. -func (r *RepositoryContent) GetContent() (string, error) { - var encoding string - if r.Encoding != nil { - encoding = *r.Encoding - } - - switch encoding { - case "base64": - c, err := base64.StdEncoding.DecodeString(*r.Content) - return string(c), err - case "": - if r.Content == nil { - return "", nil - } - return *r.Content, nil - default: - return "", fmt.Errorf("unsupported content encoding: %v", encoding) - } -} - -// GetReadme gets the Readme file for the repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/contents/#get-the-readme -func (s *RepositoriesService) GetReadme(ctx context.Context, owner, repo string, opt *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/readme", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - readme := new(RepositoryContent) - resp, err := s.client.Do(ctx, req, readme) - if err != nil { - return nil, resp, err - } - return readme, resp, nil -} - -// DownloadContents returns an io.ReadCloser that reads the contents of the -// specified file. This function will work with files of any size, as opposed -// to GetContents which is limited to 1 Mb files. It is the caller's -// responsibility to close the ReadCloser. -func (s *RepositoriesService) DownloadContents(ctx context.Context, owner, repo, filepath string, opt *RepositoryContentGetOptions) (io.ReadCloser, error) { - dir := path.Dir(filepath) - filename := path.Base(filepath) - _, dirContents, _, err := s.GetContents(ctx, owner, repo, dir, opt) - if err != nil { - return nil, err - } - for _, contents := range dirContents { - if *contents.Name == filename { - if contents.DownloadURL == nil || *contents.DownloadURL == "" { - return nil, fmt.Errorf("No download link found for %s", filepath) - } - resp, err := s.client.client.Get(*contents.DownloadURL) - if err != nil { - return nil, err - } - return resp.Body, nil - } - } - return nil, fmt.Errorf("No file named %s found in %s", filename, dir) -} - -// GetContents can return either the metadata and content of a single file -// (when path references a file) or the metadata of all the files and/or -// subdirectories of a directory (when path references a directory). To make it -// easy to distinguish between both result types and to mimic the API as much -// as possible, both result types will be returned but only one will contain a -// value and the other will be nil. -// -// GitHub API docs: https://developer.github.com/v3/repos/contents/#get-contents -func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path string, opt *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) { - escapedPath := (&url.URL{Path: path}).String() - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath) - u, err = addOptions(u, opt) - if err != nil { - return nil, nil, nil, err - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, nil, err - } - var rawJSON json.RawMessage - resp, err = s.client.Do(ctx, req, &rawJSON) - if err != nil { - return nil, nil, resp, err - } - fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent) - if fileUnmarshalError == nil { - return fileContent, nil, resp, nil - } - directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent) - if directoryUnmarshalError == nil { - return nil, directoryContent, resp, nil - } - return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s", fileUnmarshalError, directoryUnmarshalError) -} - -// CreateFile creates a new file in a repository at the given path and returns -// the commit and file metadata. -// -// GitHub API docs: https://developer.github.com/v3/repos/contents/#create-a-file -func (s *RepositoriesService) CreateFile(ctx context.Context, owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, nil, err - } - createResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(ctx, req, createResponse) - if err != nil { - return nil, resp, err - } - return createResponse, resp, nil -} - -// UpdateFile updates a file in a repository at the given path and returns the -// commit and file metadata. Requires the blob SHA of the file being updated. -// -// GitHub API docs: https://developer.github.com/v3/repos/contents/#update-a-file -func (s *RepositoriesService) UpdateFile(ctx context.Context, owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, nil, err - } - updateResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(ctx, req, updateResponse) - if err != nil { - return nil, resp, err - } - return updateResponse, resp, nil -} - -// DeleteFile deletes a file from a repository and returns the commit. -// Requires the blob SHA of the file to be deleted. -// -// GitHub API docs: https://developer.github.com/v3/repos/contents/#delete-a-file -func (s *RepositoriesService) DeleteFile(ctx context.Context, owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("DELETE", u, opt) - if err != nil { - return nil, nil, err - } - deleteResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(ctx, req, deleteResponse) - if err != nil { - return nil, resp, err - } - return deleteResponse, resp, nil -} - -// archiveFormat is used to define the archive type when calling GetArchiveLink. -type archiveFormat string - -const ( - // Tarball specifies an archive in gzipped tar format. - Tarball archiveFormat = "tarball" - - // Zipball specifies an archive in zip format. - Zipball archiveFormat = "zipball" -) - -// GetArchiveLink returns an URL to download a tarball or zipball archive for a -// repository. The archiveFormat can be specified by either the github.Tarball -// or github.Zipball constant. -// -// GitHub API docs: https://developer.github.com/v3/repos/contents/#get-archive-link -func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo string, archiveformat archiveFormat, opt *RepositoryContentGetOptions) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat) - if opt != nil && opt.Ref != "" { - u += fmt.Sprintf("/%s", opt.Ref) - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - req = withContext(ctx, req) - if s.client.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = s.client.client.Transport.RoundTrip(req) - } - if err != nil { - return nil, nil, err - } - resp.Body.Close() - if resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - parsedURL, err := url.Parse(resp.Header.Get("Location")) - return parsedURL, newResponse(resp), err -} diff --git a/vendor/github.com/google/go-github/github/repos_deployments.go b/vendor/github.com/google/go-github/github/repos_deployments.go deleted file mode 100644 index 604632e91b66..000000000000 --- a/vendor/github.com/google/go-github/github/repos_deployments.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" - "strings" -) - -// Deployment represents a deployment in a repo -type Deployment struct { - URL *string `json:"url,omitempty"` - ID *int64 `json:"id,omitempty"` - SHA *string `json:"sha,omitempty"` - Ref *string `json:"ref,omitempty"` - Task *string `json:"task,omitempty"` - Payload json.RawMessage `json:"payload,omitempty"` - Environment *string `json:"environment,omitempty"` - Description *string `json:"description,omitempty"` - Creator *User `json:"creator,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// DeploymentRequest represents a deployment request -type DeploymentRequest struct { - Ref *string `json:"ref,omitempty"` - Task *string `json:"task,omitempty"` - AutoMerge *bool `json:"auto_merge,omitempty"` - RequiredContexts *[]string `json:"required_contexts,omitempty"` - Payload *string `json:"payload,omitempty"` - Environment *string `json:"environment,omitempty"` - Description *string `json:"description,omitempty"` - TransientEnvironment *bool `json:"transient_environment,omitempty"` - ProductionEnvironment *bool `json:"production_environment,omitempty"` -} - -// DeploymentsListOptions specifies the optional parameters to the -// RepositoriesService.ListDeployments method. -type DeploymentsListOptions struct { - // SHA of the Deployment. - SHA string `url:"sha,omitempty"` - - // List deployments for a given ref. - Ref string `url:"ref,omitempty"` - - // List deployments for a given task. - Task string `url:"task,omitempty"` - - // List deployments for a given environment. - Environment string `url:"environment,omitempty"` - - ListOptions -} - -// ListDeployments lists the deployments of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployments -func (s *RepositoriesService) ListDeployments(ctx context.Context, owner, repo string, opt *DeploymentsListOptions) ([]*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var deployments []*Deployment - resp, err := s.client.Do(ctx, req, &deployments) - if err != nil { - return nil, resp, err - } - - return deployments, resp, nil -} - -// GetDeployment returns a single deployment of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#get-a-single-deployment -func (s *RepositoriesService) GetDeployment(ctx context.Context, owner, repo string, deploymentID int64) (*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - deployment := new(Deployment) - resp, err := s.client.Do(ctx, req, deployment) - if err != nil { - return nil, resp, err - } - - return deployment, resp, nil -} - -// CreateDeployment creates a new deployment for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment -func (s *RepositoriesService) CreateDeployment(ctx context.Context, owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - d := new(Deployment) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// DeploymentStatus represents the status of a -// particular deployment. -type DeploymentStatus struct { - ID *int64 `json:"id,omitempty"` - // State is the deployment state. - // Possible values are: "pending", "success", "failure", "error", "inactive". - State *string `json:"state,omitempty"` - Creator *User `json:"creator,omitempty"` - Description *string `json:"description,omitempty"` - TargetURL *string `json:"target_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - DeploymentURL *string `json:"deployment_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// DeploymentStatusRequest represents a deployment request -type DeploymentStatusRequest struct { - State *string `json:"state,omitempty"` - LogURL *string `json:"log_url,omitempty"` - Description *string `json:"description,omitempty"` - Environment *string `json:"environment,omitempty"` - EnvironmentURL *string `json:"environment_url,omitempty"` - AutoInactive *bool `json:"auto_inactive,omitempty"` -} - -// ListDeploymentStatuses lists the statuses of a given deployment of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployment-statuses -func (s *RepositoriesService) ListDeploymentStatuses(ctx context.Context, owner, repo string, deployment int64, opt *ListOptions) ([]*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var statuses []*DeploymentStatus - resp, err := s.client.Do(ctx, req, &statuses) - if err != nil { - return nil, resp, err - } - - return statuses, resp, nil -} - -// GetDeploymentStatus returns a single deployment status of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#get-a-single-deployment-status -func (s *RepositoriesService) GetDeploymentStatus(ctx context.Context, owner, repo string, deploymentID, deploymentStatusID int64) (*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses/%v", owner, repo, deploymentID, deploymentStatusID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - d := new(DeploymentStatus) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateDeploymentStatus creates a new status for a deployment. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment-status -func (s *RepositoriesService) CreateDeploymentStatus(ctx context.Context, owner, repo string, deployment int64, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - d := new(DeploymentStatus) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_forks.go b/vendor/github.com/google/go-github/github/repos_forks.go deleted file mode 100644 index bfff27bb9512..000000000000 --- a/vendor/github.com/google/go-github/github/repos_forks.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - - "encoding/json" -) - -// RepositoryListForksOptions specifies the optional parameters to the -// RepositoriesService.ListForks method. -type RepositoryListForksOptions struct { - // How to sort the forks list. Possible values are: newest, oldest, - // watchers. Default is "newest". - Sort string `url:"sort,omitempty"` - - ListOptions -} - -// ListForks lists the forks of the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/forks/#list-forks -func (s *RepositoriesService) ListForks(ctx context.Context, owner, repo string, opt *RepositoryListForksOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when topics API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// RepositoryCreateForkOptions specifies the optional parameters to the -// RepositoriesService.CreateFork method. -type RepositoryCreateForkOptions struct { - // The organization to fork the repository into. - Organization string `url:"organization,omitempty"` -} - -// CreateFork creates a fork of the specified repository. -// -// This method might return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing creating the fork in a background task. In this event, -// the Repository value will be returned, which includes the details about the pending fork. -// A follow up request, after a delay of a second or so, should result -// in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/forks/#create-a-fork -func (s *RepositoriesService) CreateFork(ctx context.Context, owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - fork := new(Repository) - resp, err := s.client.Do(ctx, req, fork) - if err != nil { - // Persist AcceptedError's metadata to the Repository object. - if aerr, ok := err.(*AcceptedError); ok { - if err := json.Unmarshal(aerr.Raw, fork); err != nil { - return fork, resp, err - } - - return fork, resp, err - } - return nil, resp, err - } - - return fork, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_hooks.go b/vendor/github.com/google/go-github/github/repos_hooks.go deleted file mode 100644 index 56374b3ec145..000000000000 --- a/vendor/github.com/google/go-github/github/repos_hooks.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// WebHookPayload represents the data that is received from GitHub when a push -// event hook is triggered. The format of these payloads pre-date most of the -// GitHub v3 API, so there are lots of minor incompatibilities with the types -// defined in the rest of the API. Therefore, several types are duplicated -// here to account for these differences. -// -// GitHub API docs: https://help.github.com/articles/post-receive-hooks -type WebHookPayload struct { - After *string `json:"after,omitempty"` - Before *string `json:"before,omitempty"` - Commits []WebHookCommit `json:"commits,omitempty"` - Compare *string `json:"compare,omitempty"` - Created *bool `json:"created,omitempty"` - Deleted *bool `json:"deleted,omitempty"` - Forced *bool `json:"forced,omitempty"` - HeadCommit *WebHookCommit `json:"head_commit,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Ref *string `json:"ref,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -func (w WebHookPayload) String() string { - return Stringify(w) -} - -// WebHookCommit represents the commit variant we receive from GitHub in a -// WebHookPayload. -type WebHookCommit struct { - Added []string `json:"added,omitempty"` - Author *WebHookAuthor `json:"author,omitempty"` - Committer *WebHookAuthor `json:"committer,omitempty"` - Distinct *bool `json:"distinct,omitempty"` - ID *string `json:"id,omitempty"` - Message *string `json:"message,omitempty"` - Modified []string `json:"modified,omitempty"` - Removed []string `json:"removed,omitempty"` - Timestamp *time.Time `json:"timestamp,omitempty"` -} - -func (w WebHookCommit) String() string { - return Stringify(w) -} - -// WebHookAuthor represents the author or committer of a commit, as specified -// in a WebHookCommit. The commit author may not correspond to a GitHub User. -type WebHookAuthor struct { - Email *string `json:"email,omitempty"` - Name *string `json:"name,omitempty"` - Username *string `json:"username,omitempty"` -} - -func (w WebHookAuthor) String() string { - return Stringify(w) -} - -// Hook represents a GitHub (web and service) hook for a repository. -type Hook struct { - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - ID *int64 `json:"id,omitempty"` - - // Only the following fields are used when creating a hook. - // Config is required. - Config map[string]interface{} `json:"config,omitempty"` - Events []string `json:"events,omitempty"` - Active *bool `json:"active,omitempty"` -} - -func (h Hook) String() string { - return Stringify(h) -} - -// createHookRequest is a subset of Hook and is used internally -// by CreateHook to pass only the known fields for the endpoint. -// -// See https://github.com/google/go-github/issues/1015 for more -// information. -type createHookRequest struct { - // Config is required. - Config map[string]interface{} `json:"config,omitempty"` - Events []string `json:"events,omitempty"` - Active *bool `json:"active,omitempty"` -} - -// CreateHook creates a Hook for the specified repository. -// Config is a required field. -// -// Note that only a subset of the hook fields are used and hook must -// not be nil. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#create-a-hook -func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) - - hookReq := &createHookRequest{ - Events: hook.Events, - Active: hook.Active, - Config: hook.Config, - } - - req, err := s.client.NewRequest("POST", u, hookReq) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// ListHooks lists all Hooks for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#list -func (s *RepositoriesService) ListHooks(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var hooks []*Hook - resp, err := s.client.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} - -// GetHook returns a single specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#get-single-hook -func (s *RepositoriesService) GetHook(ctx context.Context, owner, repo string, id int64) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// EditHook updates a specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#edit-a-hook -func (s *RepositoriesService) EditHook(ctx context.Context, owner, repo string, id int64, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// DeleteHook deletes a specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#delete-a-hook -func (s *RepositoriesService) DeleteHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// PingHook triggers a 'ping' event to be sent to the Hook. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#ping-a-hook -func (s *RepositoriesService) PingHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// TestHook triggers a test Hook by github. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#test-a-push-hook -func (s *RepositoriesService) TestHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_invitations.go b/vendor/github.com/google/go-github/github/repos_invitations.go deleted file mode 100644 index b88e9359f3ee..000000000000 --- a/vendor/github.com/google/go-github/github/repos_invitations.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryInvitation represents an invitation to collaborate on a repo. -type RepositoryInvitation struct { - ID *int64 `json:"id,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Invitee *User `json:"invitee,omitempty"` - Inviter *User `json:"inviter,omitempty"` - - // Permissions represents the permissions that the associated user will have - // on the repository. Possible values are: "read", "write", "admin". - Permissions *string `json:"permissions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` -} - -// ListInvitations lists all currently-open repository invitations. -// -// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-invitations-for-a-repository -func (s *RepositoriesService) ListInvitations(ctx context.Context, owner, repo string, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/invitations", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - invites := []*RepositoryInvitation{} - resp, err := s.client.Do(ctx, req, &invites) - if err != nil { - return nil, resp, err - } - - return invites, resp, nil -} - -// DeleteInvitation deletes a repository invitation. -// -// GitHub API docs: https://developer.github.com/v3/repos/invitations/#delete-a-repository-invitation -func (s *RepositoriesService) DeleteInvitation(ctx context.Context, owner, repo string, invitationID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// UpdateInvitation updates the permissions associated with a repository -// invitation. -// -// permissions represents the permissions that the associated user will have -// on the repository. Possible values are: "read", "write", "admin". -// -// GitHub API docs: https://developer.github.com/v3/repos/invitations/#update-a-repository-invitation -func (s *RepositoriesService) UpdateInvitation(ctx context.Context, owner, repo string, invitationID int64, permissions string) (*RepositoryInvitation, *Response, error) { - opts := &struct { - Permissions string `json:"permissions"` - }{Permissions: permissions} - u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - invite := &RepositoryInvitation{} - resp, err := s.client.Do(ctx, req, invite) - if err != nil { - return nil, resp, err - } - - return invite, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_keys.go b/vendor/github.com/google/go-github/github/repos_keys.go deleted file mode 100644 index b484f8444632..000000000000 --- a/vendor/github.com/google/go-github/github/repos_keys.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// The Key type is defined in users_keys.go - -// ListKeys lists the deploy keys for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/keys/#list -func (s *RepositoriesService) ListKeys(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*Key - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetKey fetches a single deploy key. -// -// GitHub API docs: https://developer.github.com/v3/repos/keys/#get -func (s *RepositoriesService) GetKey(ctx context.Context, owner string, repo string, id int64) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(Key) - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateKey adds a deploy key for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/keys/#create -func (s *RepositoriesService) CreateKey(ctx context.Context, owner string, repo string, key *Key) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(ctx, req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// EditKey edits a deploy key. -// -// GitHub API docs: https://developer.github.com/v3/repos/keys/#edit -func (s *RepositoriesService) EditKey(ctx context.Context, owner string, repo string, id int64, key *Key) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(ctx, req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteKey deletes a deploy key. -// -// GitHub API docs: https://developer.github.com/v3/repos/keys/#delete -func (s *RepositoriesService) DeleteKey(ctx context.Context, owner string, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_merging.go b/vendor/github.com/google/go-github/github/repos_merging.go deleted file mode 100644 index 04383c1ae32c..000000000000 --- a/vendor/github.com/google/go-github/github/repos_merging.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryMergeRequest represents a request to merge a branch in a -// repository. -type RepositoryMergeRequest struct { - Base *string `json:"base,omitempty"` - Head *string `json:"head,omitempty"` - CommitMessage *string `json:"commit_message,omitempty"` -} - -// Merge a branch in the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/merging/#perform-a-merge -func (s *RepositoriesService) Merge(ctx context.Context, owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/merges", owner, repo) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(ctx, req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go deleted file mode 100644 index 94a95f2b8ed8..000000000000 --- a/vendor/github.com/google/go-github/github/repos_pages.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Pages represents a GitHub Pages site configuration. -type Pages struct { - URL *string `json:"url,omitempty"` - Status *string `json:"status,omitempty"` - CNAME *string `json:"cname,omitempty"` - Custom404 *bool `json:"custom_404,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` -} - -// PagesError represents a build error for a GitHub Pages site. -type PagesError struct { - Message *string `json:"message,omitempty"` -} - -// PagesBuild represents the build information for a GitHub Pages site. -type PagesBuild struct { - URL *string `json:"url,omitempty"` - Status *string `json:"status,omitempty"` - Error *PagesError `json:"error,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Commit *string `json:"commit,omitempty"` - Duration *int `json:"duration,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -// GetPagesInfo fetches information about a GitHub Pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#get-information-about-a-pages-site -func (s *RepositoriesService) GetPagesInfo(ctx context.Context, owner, repo string) (*Pages, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePagesPreview) - - site := new(Pages) - resp, err := s.client.Do(ctx, req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, nil -} - -// ListPagesBuilds lists the builds for a GitHub Pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds -func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo string, opt *ListOptions) ([]*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pages []*PagesBuild - resp, err := s.client.Do(ctx, req, &pages) - if err != nil { - return nil, resp, err - } - - return pages, resp, nil -} - -// GetLatestPagesBuild fetches the latest build information for a GitHub pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-latest-pages-build -func (s *RepositoriesService) GetLatestPagesBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - build := new(PagesBuild) - resp, err := s.client.Do(ctx, req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, nil -} - -// GetPageBuild fetches the specific build information for a GitHub pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-a-specific-pages-build -func (s *RepositoriesService) GetPageBuild(ctx context.Context, owner, repo string, id int64) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds/%v", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - build := new(PagesBuild) - resp, err := s.client.Do(ctx, req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, nil -} - -// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build -func (s *RepositoriesService) RequestPageBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePagesPreview) - - build := new(PagesBuild) - resp, err := s.client.Do(ctx, req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_prereceive_hooks.go b/vendor/github.com/google/go-github/github/repos_prereceive_hooks.go deleted file mode 100644 index cab09f74791c..000000000000 --- a/vendor/github.com/google/go-github/github/repos_prereceive_hooks.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// PreReceiveHook represents a GitHub pre-receive hook for a repository. -type PreReceiveHook struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Enforcement *string `json:"enforcement,omitempty"` - ConfigURL *string `json:"configuration_url,omitempty"` -} - -func (p PreReceiveHook) String() string { - return Stringify(p) -} - -// ListPreReceiveHooks lists all pre-receive hooks for the specified repository. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#list-pre-receive-hooks -func (s *RepositoriesService) ListPreReceiveHooks(ctx context.Context, owner, repo string, opt *ListOptions) ([]*PreReceiveHook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - var hooks []*PreReceiveHook - resp, err := s.client.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} - -// GetPreReceiveHook returns a single specified pre-receive hook. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#get-a-single-pre-receive-hook -func (s *RepositoriesService) GetPreReceiveHook(ctx context.Context, owner, repo string, id int64) (*PreReceiveHook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - h := new(PreReceiveHook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// UpdatePreReceiveHook updates a specified pre-receive hook. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#update-pre-receive-hook-enforcement -func (s *RepositoriesService) UpdatePreReceiveHook(ctx context.Context, owner, repo string, id int64, hook *PreReceiveHook) (*PreReceiveHook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - h := new(PreReceiveHook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// DeletePreReceiveHook deletes a specified pre-receive hook. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#remove-enforcement-overrides-for-a-pre-receive-hook -func (s *RepositoriesService) DeletePreReceiveHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_projects.go b/vendor/github.com/google/go-github/github/repos_projects.go deleted file mode 100644 index d6486d293c81..000000000000 --- a/vendor/github.com/google/go-github/github/repos_projects.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ProjectListOptions specifies the optional parameters to the -// OrganizationsService.ListProjects and RepositoriesService.ListProjects methods. -type ProjectListOptions struct { - // Indicates the state of the projects to return. Can be either open, closed, or all. Default: open - State string `url:"state,omitempty"` - - ListOptions -} - -// ListProjects lists the projects for a repo. -// -// GitHub API docs: https://developer.github.com/v3/projects/#list-repository-projects -func (s *RepositoriesService) ListProjects(ctx context.Context, owner, repo string, opt *ProjectListOptions) ([]*Project, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/projects", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// CreateProject creates a GitHub Project for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/projects/#create-a-repository-project -func (s *RepositoriesService) CreateProject(ctx context.Context, owner, repo string, opt *ProjectOptions) (*Project, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/projects", owner, repo) - req, err := s.client.NewRequest("POST", u, opt) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_releases.go b/vendor/github.com/google/go-github/github/repos_releases.go deleted file mode 100644 index bf58743ebdc9..000000000000 --- a/vendor/github.com/google/go-github/github/repos_releases.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" - "io" - "mime" - "net/http" - "os" - "path/filepath" - "strings" -) - -// RepositoryRelease represents a GitHub release in a repository. -type RepositoryRelease struct { - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` - - // The following fields are not used in CreateRelease or EditRelease: - ID *int64 `json:"id,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PublishedAt *Timestamp `json:"published_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - AssetsURL *string `json:"assets_url,omitempty"` - Assets []ReleaseAsset `json:"assets,omitempty"` - UploadURL *string `json:"upload_url,omitempty"` - ZipballURL *string `json:"zipball_url,omitempty"` - TarballURL *string `json:"tarball_url,omitempty"` - Author *User `json:"author,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (r RepositoryRelease) String() string { - return Stringify(r) -} - -// ReleaseAsset represents a GitHub release asset in a repository. -type ReleaseAsset struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Label *string `json:"label,omitempty"` - State *string `json:"state,omitempty"` - ContentType *string `json:"content_type,omitempty"` - Size *int `json:"size,omitempty"` - DownloadCount *int `json:"download_count,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - BrowserDownloadURL *string `json:"browser_download_url,omitempty"` - Uploader *User `json:"uploader,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (r ReleaseAsset) String() string { - return Stringify(r) -} - -// ListReleases lists the releases for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#list-releases-for-a-repository -func (s *RepositoriesService) ListReleases(ctx context.Context, owner, repo string, opt *ListOptions) ([]*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var releases []*RepositoryRelease - resp, err := s.client.Do(ctx, req, &releases) - if err != nil { - return nil, resp, err - } - return releases, resp, nil -} - -// GetRelease fetches a single release. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-single-release -func (s *RepositoriesService) GetRelease(ctx context.Context, owner, repo string, id int64) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - return s.getSingleRelease(ctx, u) -} - -// GetLatestRelease fetches the latest published release for the repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-the-latest-release -func (s *RepositoriesService) GetLatestRelease(ctx context.Context, owner, repo string) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo) - return s.getSingleRelease(ctx, u) -} - -// GetReleaseByTag fetches a release with the specified tag. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name -func (s *RepositoriesService) GetReleaseByTag(ctx context.Context, owner, repo, tag string) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag) - return s.getSingleRelease(ctx, u) -} - -func (s *RepositoriesService) getSingleRelease(ctx context.Context, url string) (*RepositoryRelease, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - release := new(RepositoryRelease) - resp, err := s.client.Do(ctx, req, release) - if err != nil { - return nil, resp, err - } - return release, resp, nil -} - -// repositoryReleaseRequest is a subset of RepositoryRelease and -// is used internally by CreateRelease and EditRelease to pass -// only the known fields for these endpoints. -// -// See https://github.com/google/go-github/issues/992 for more -// information. -type repositoryReleaseRequest struct { - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` -} - -// CreateRelease adds a new release for a repository. -// -// Note that only a subset of the release fields are used. -// See RepositoryRelease for more information. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#create-a-release -func (s *RepositoriesService) CreateRelease(ctx context.Context, owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) - - releaseReq := &repositoryReleaseRequest{ - TagName: release.TagName, - TargetCommitish: release.TargetCommitish, - Name: release.Name, - Body: release.Body, - Draft: release.Draft, - Prerelease: release.Prerelease, - } - - req, err := s.client.NewRequest("POST", u, releaseReq) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryRelease) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - return r, resp, nil -} - -// EditRelease edits a repository release. -// -// Note that only a subset of the release fields are used. -// See RepositoryRelease for more information. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#edit-a-release -func (s *RepositoriesService) EditRelease(ctx context.Context, owner, repo string, id int64, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - - releaseReq := &repositoryReleaseRequest{ - TagName: release.TagName, - TargetCommitish: release.TargetCommitish, - Name: release.Name, - Body: release.Body, - Draft: release.Draft, - Prerelease: release.Prerelease, - } - - req, err := s.client.NewRequest("PATCH", u, releaseReq) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryRelease) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - return r, resp, nil -} - -// DeleteRelease delete a single release from a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#delete-a-release -func (s *RepositoriesService) DeleteRelease(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// ListReleaseAssets lists the release's assets. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#list-assets-for-a-release -func (s *RepositoriesService) ListReleaseAssets(ctx context.Context, owner, repo string, id int64, opt *ListOptions) ([]*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var assets []*ReleaseAsset - resp, err := s.client.Do(ctx, req, &assets) - if err != nil { - return nil, resp, err - } - return assets, resp, nil -} - -// GetReleaseAsset fetches a single release asset. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-single-release-asset -func (s *RepositoriesService) GetReleaseAsset(ctx context.Context, owner, repo string, id int64) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(ctx, req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, nil -} - -// DownloadReleaseAsset downloads a release asset or returns a redirect URL. -// -// DownloadReleaseAsset returns an io.ReadCloser that reads the contents of the -// specified release asset. It is the caller's responsibility to close the ReadCloser. -// If a redirect is returned, the redirect URL will be returned as a string instead -// of the io.ReadCloser. Exactly one of rc and redirectURL will be zero. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-single-release-asset -func (s *RepositoriesService) DownloadReleaseAsset(ctx context.Context, owner, repo string, id int64) (rc io.ReadCloser, redirectURL string, err error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, "", err - } - req.Header.Set("Accept", defaultMediaType) - - s.client.clientMu.Lock() - defer s.client.clientMu.Unlock() - - var loc string - saveRedirect := s.client.client.CheckRedirect - s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - loc = req.URL.String() - return errors.New("disable redirect") - } - defer func() { s.client.client.CheckRedirect = saveRedirect }() - - req = withContext(ctx, req) - resp, err := s.client.client.Do(req) - if err != nil { - if !strings.Contains(err.Error(), "disable redirect") { - return nil, "", err - } - return nil, loc, nil // Intentionally return no error with valid redirect URL. - } - - if err := CheckResponse(resp); err != nil { - resp.Body.Close() - return nil, "", err - } - - return resp.Body, "", nil -} - -// EditReleaseAsset edits a repository release asset. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#edit-a-release-asset -func (s *RepositoriesService) EditReleaseAsset(ctx context.Context, owner, repo string, id int64, release *ReleaseAsset) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, release) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(ctx, req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, nil -} - -// DeleteReleaseAsset delete a single release asset from a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#delete-a-release-asset -func (s *RepositoriesService) DeleteReleaseAsset(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// UploadReleaseAsset creates an asset by uploading a file into a release repository. -// To upload assets that cannot be represented by an os.File, call NewUploadRequest directly. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#upload-a-release-asset -func (s *RepositoriesService) UploadReleaseAsset(ctx context.Context, owner, repo string, id int64, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - stat, err := file.Stat() - if err != nil { - return nil, nil, err - } - if stat.IsDir() { - return nil, nil, errors.New("the asset to upload can't be a directory") - } - - mediaType := mime.TypeByExtension(filepath.Ext(file.Name())) - req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(ctx, req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_stats.go b/vendor/github.com/google/go-github/github/repos_stats.go deleted file mode 100644 index bb355aeadd3a..000000000000 --- a/vendor/github.com/google/go-github/github/repos_stats.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// ContributorStats represents a contributor to a repository and their -// weekly contributions to a given repo. -type ContributorStats struct { - Author *Contributor `json:"author,omitempty"` - Total *int `json:"total,omitempty"` - Weeks []WeeklyStats `json:"weeks,omitempty"` -} - -func (c ContributorStats) String() string { - return Stringify(c) -} - -// WeeklyStats represents the number of additions, deletions and commits -// a Contributor made in a given week. -type WeeklyStats struct { - Week *Timestamp `json:"w,omitempty"` - Additions *int `json:"a,omitempty"` - Deletions *int `json:"d,omitempty"` - Commits *int `json:"c,omitempty"` -} - -func (w WeeklyStats) String() string { - return Stringify(w) -} - -// ListContributorsStats gets a repo's contributor list with additions, -// deletions and commit counts. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/statistics/#contributors -func (s *RepositoriesService) ListContributorsStats(ctx context.Context, owner, repo string) ([]*ContributorStats, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var contributorStats []*ContributorStats - resp, err := s.client.Do(ctx, req, &contributorStats) - if err != nil { - return nil, resp, err - } - - return contributorStats, resp, nil -} - -// WeeklyCommitActivity represents the weekly commit activity for a repository. -// The days array is a group of commits per day, starting on Sunday. -type WeeklyCommitActivity struct { - Days []int `json:"days,omitempty"` - Total *int `json:"total,omitempty"` - Week *Timestamp `json:"week,omitempty"` -} - -func (w WeeklyCommitActivity) String() string { - return Stringify(w) -} - -// ListCommitActivity returns the last year of commit activity -// grouped by week. The days array is a group of commits per day, -// starting on Sunday. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/statistics/#commit-activity -func (s *RepositoriesService) ListCommitActivity(ctx context.Context, owner, repo string) ([]*WeeklyCommitActivity, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var weeklyCommitActivity []*WeeklyCommitActivity - resp, err := s.client.Do(ctx, req, &weeklyCommitActivity) - if err != nil { - return nil, resp, err - } - - return weeklyCommitActivity, resp, nil -} - -// ListCodeFrequency returns a weekly aggregate of the number of additions and -// deletions pushed to a repository. Returned WeeklyStats will contain -// additions and deletions, but not total commits. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/statistics/#code-frequency -func (s *RepositoriesService) ListCodeFrequency(ctx context.Context, owner, repo string) ([]*WeeklyStats, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var weeks [][]int - resp, err := s.client.Do(ctx, req, &weeks) - - // convert int slices into WeeklyStats - var stats []*WeeklyStats - for _, week := range weeks { - if len(week) != 3 { - continue - } - stat := &WeeklyStats{ - Week: &Timestamp{time.Unix(int64(week[0]), 0)}, - Additions: Int(week[1]), - Deletions: Int(week[2]), - } - stats = append(stats, stat) - } - - return stats, resp, err -} - -// RepositoryParticipation is the number of commits by everyone -// who has contributed to the repository (including the owner) -// as well as the number of commits by the owner themself. -type RepositoryParticipation struct { - All []int `json:"all,omitempty"` - Owner []int `json:"owner,omitempty"` -} - -func (r RepositoryParticipation) String() string { - return Stringify(r) -} - -// ListParticipation returns the total commit counts for the 'owner' -// and total commit counts in 'all'. 'all' is everyone combined, -// including the 'owner' in the last 52 weeks. If you’d like to get -// the commit counts for non-owners, you can subtract 'all' from 'owner'. -// -// The array order is oldest week (index 0) to most recent week. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/statistics/#participation -func (s *RepositoriesService) ListParticipation(ctx context.Context, owner, repo string) (*RepositoryParticipation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - participation := new(RepositoryParticipation) - resp, err := s.client.Do(ctx, req, participation) - if err != nil { - return nil, resp, err - } - - return participation, resp, nil -} - -// PunchCard represents the number of commits made during a given hour of a -// day of the week. -type PunchCard struct { - Day *int // Day of the week (0-6: =Sunday - Saturday). - Hour *int // Hour of day (0-23). - Commits *int // Number of commits. -} - -// ListPunchCard returns the number of commits per hour in each day. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://developer.github.com/v3/repos/statistics/#punch-card -func (s *RepositoriesService) ListPunchCard(ctx context.Context, owner, repo string) ([]*PunchCard, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var results [][]int - resp, err := s.client.Do(ctx, req, &results) - - // convert int slices into Punchcards - var cards []*PunchCard - for _, result := range results { - if len(result) != 3 { - continue - } - card := &PunchCard{ - Day: Int(result[0]), - Hour: Int(result[1]), - Commits: Int(result[2]), - } - cards = append(cards, card) - } - - return cards, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_statuses.go b/vendor/github.com/google/go-github/github/repos_statuses.go deleted file mode 100644 index f94fdc858b87..000000000000 --- a/vendor/github.com/google/go-github/github/repos_statuses.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// RepoStatus represents the status of a repository at a particular reference. -type RepoStatus struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - - // State is the current state of the repository. Possible values are: - // pending, success, error, or failure. - State *string `json:"state,omitempty"` - - // TargetURL is the URL of the page representing this status. It will be - // linked from the GitHub UI to allow users to see the source of the status. - TargetURL *string `json:"target_url,omitempty"` - - // Description is a short high level summary of the status. - Description *string `json:"description,omitempty"` - - // A string label to differentiate this status from the statuses of other systems. - Context *string `json:"context,omitempty"` - - Creator *User `json:"creator,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` -} - -func (r RepoStatus) String() string { - return Stringify(r) -} - -// ListStatuses lists the statuses of a repository at the specified -// reference. ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref -func (s *RepositoriesService) ListStatuses(ctx context.Context, owner, repo, ref string, opt *ListOptions) ([]*RepoStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, ref) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var statuses []*RepoStatus - resp, err := s.client.Do(ctx, req, &statuses) - if err != nil { - return nil, resp, err - } - - return statuses, resp, nil -} - -// CreateStatus creates a new status for a repository at the specified -// reference. Ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://developer.github.com/v3/repos/statuses/#create-a-status -func (s *RepositoriesService) CreateStatus(ctx context.Context, owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, ref) - req, err := s.client.NewRequest("POST", u, status) - if err != nil { - return nil, nil, err - } - - repoStatus := new(RepoStatus) - resp, err := s.client.Do(ctx, req, repoStatus) - if err != nil { - return nil, resp, err - } - - return repoStatus, resp, nil -} - -// CombinedStatus represents the combined status of a repository at a particular reference. -type CombinedStatus struct { - // State is the combined state of the repository. Possible values are: - // failure, pending, or success. - State *string `json:"state,omitempty"` - - Name *string `json:"name,omitempty"` - SHA *string `json:"sha,omitempty"` - TotalCount *int `json:"total_count,omitempty"` - Statuses []RepoStatus `json:"statuses,omitempty"` - - CommitURL *string `json:"commit_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` -} - -func (s CombinedStatus) String() string { - return Stringify(s) -} - -// GetCombinedStatus returns the combined status of a repository at the specified -// reference. ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref -func (s *RepositoriesService) GetCombinedStatus(ctx context.Context, owner, repo, ref string, opt *ListOptions) (*CombinedStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, ref) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - status := new(CombinedStatus) - resp, err := s.client.Do(ctx, req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/repos_traffic.go b/vendor/github.com/google/go-github/github/repos_traffic.go deleted file mode 100644 index fb1c97648a7b..000000000000 --- a/vendor/github.com/google/go-github/github/repos_traffic.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TrafficReferrer represent information about traffic from a referrer . -type TrafficReferrer struct { - Referrer *string `json:"referrer,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficPath represent information about the traffic on a path of the repo. -type TrafficPath struct { - Path *string `json:"path,omitempty"` - Title *string `json:"title,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficData represent information about a specific timestamp in views or clones list. -type TrafficData struct { - Timestamp *Timestamp `json:"timestamp,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficViews represent information about the number of views in the last 14 days. -type TrafficViews struct { - Views []*TrafficData `json:"views,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficClones represent information about the number of clones in the last 14 days. -type TrafficClones struct { - Clones []*TrafficData `json:"clones,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficBreakdownOptions specifies the parameters to methods that support breakdown per day or week. -// Can be one of: day, week. Default: day. -type TrafficBreakdownOptions struct { - Per string `url:"per,omitempty"` -} - -// ListTrafficReferrers list the top 10 referrers over the last 14 days. -// -// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-referrers -func (s *RepositoriesService) ListTrafficReferrers(ctx context.Context, owner, repo string) ([]*TrafficReferrer, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/popular/referrers", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var trafficReferrers []*TrafficReferrer - resp, err := s.client.Do(ctx, req, &trafficReferrers) - if err != nil { - return nil, resp, err - } - - return trafficReferrers, resp, nil -} - -// ListTrafficPaths list the top 10 popular content over the last 14 days. -// -// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-paths -func (s *RepositoriesService) ListTrafficPaths(ctx context.Context, owner, repo string) ([]*TrafficPath, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/popular/paths", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var paths []*TrafficPath - resp, err := s.client.Do(ctx, req, &paths) - if err != nil { - return nil, resp, err - } - - return paths, resp, nil -} - -// ListTrafficViews get total number of views for the last 14 days and breaks it down either per day or week. -// -// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views -func (s *RepositoriesService) ListTrafficViews(ctx context.Context, owner, repo string, opt *TrafficBreakdownOptions) (*TrafficViews, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/views", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - trafficViews := new(TrafficViews) - resp, err := s.client.Do(ctx, req, &trafficViews) - if err != nil { - return nil, resp, err - } - - return trafficViews, resp, nil -} - -// ListTrafficClones get total number of clones for the last 14 days and breaks it down either per day or week for the last 14 days. -// -// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views -func (s *RepositoriesService) ListTrafficClones(ctx context.Context, owner, repo string, opt *TrafficBreakdownOptions) (*TrafficClones, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/clones", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - trafficClones := new(TrafficClones) - resp, err := s.client.Do(ctx, req, &trafficClones) - if err != nil { - return nil, resp, err - } - - return trafficClones, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/search.go b/vendor/github.com/google/go-github/github/search.go deleted file mode 100644 index abaf5e1f0beb..000000000000 --- a/vendor/github.com/google/go-github/github/search.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/url" - "strconv" - "strings" - - qs "github.com/google/go-querystring/query" -) - -// SearchService provides access to the search related functions -// in the GitHub API. -// -// Each method takes a query string defining the search keywords and any search qualifiers. -// For example, when searching issues, the query "gopher is:issue language:go" will search -// for issues containing the word "gopher" in Go repositories. The method call -// opts := &github.SearchOptions{Sort: "created", Order: "asc"} -// cl.Search.Issues(ctx, "gopher is:issue language:go", opts) -// will search for such issues, sorting by creation date in ascending order -// (i.e., oldest first). -// -// GitHub API docs: https://developer.github.com/v3/search/ -type SearchService service - -// SearchOptions specifies optional parameters to the SearchService methods. -type SearchOptions struct { - // How to sort the search results. Possible values are: - // - for repositories: stars, fork, updated - // - for commits: author-date, committer-date - // - for code: indexed - // - for issues: comments, created, updated - // - for users: followers, repositories, joined - // - // Default is to sort by best match. - Sort string `url:"sort,omitempty"` - - // Sort order if sort parameter is provided. Possible values are: asc, - // desc. Default is desc. - Order string `url:"order,omitempty"` - - // Whether to retrieve text match metadata with a query - TextMatch bool `url:"-"` - - ListOptions -} - -// Common search parameters. -type searchParameters struct { - Query string - RepositoryID *int64 // Sent if non-nil. -} - -// RepositoriesSearchResult represents the result of a repositories search. -type RepositoriesSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Repositories []Repository `json:"items,omitempty"` -} - -// Repositories searches repositories via various criteria. -// -// GitHub API docs: https://developer.github.com/v3/search/#search-repositories -func (s *SearchService) Repositories(ctx context.Context, query string, opt *SearchOptions) (*RepositoriesSearchResult, *Response, error) { - result := new(RepositoriesSearchResult) - resp, err := s.search(ctx, "repositories", &searchParameters{Query: query}, opt, result) - return result, resp, err -} - -// CommitsSearchResult represents the result of a commits search. -type CommitsSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Commits []*CommitResult `json:"items,omitempty"` -} - -// CommitResult represents a commit object as returned in commit search endpoint response. -type CommitResult struct { - SHA *string `json:"sha,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Author *User `json:"author,omitempty"` - Committer *User `json:"committer,omitempty"` - Parents []*Commit `json:"parents,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - - Repository *Repository `json:"repository,omitempty"` - Score *float64 `json:"score,omitempty"` -} - -// Commits searches commits via various criteria. -// -// GitHub API docs: https://developer.github.com/v3/search/#search-commits -func (s *SearchService) Commits(ctx context.Context, query string, opt *SearchOptions) (*CommitsSearchResult, *Response, error) { - result := new(CommitsSearchResult) - resp, err := s.search(ctx, "commits", &searchParameters{Query: query}, opt, result) - return result, resp, err -} - -// IssuesSearchResult represents the result of an issues search. -type IssuesSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Issues []Issue `json:"items,omitempty"` -} - -// Issues searches issues via various criteria. -// -// GitHub API docs: https://developer.github.com/v3/search/#search-issues -func (s *SearchService) Issues(ctx context.Context, query string, opt *SearchOptions) (*IssuesSearchResult, *Response, error) { - result := new(IssuesSearchResult) - resp, err := s.search(ctx, "issues", &searchParameters{Query: query}, opt, result) - return result, resp, err -} - -// UsersSearchResult represents the result of a users search. -type UsersSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Users []User `json:"items,omitempty"` -} - -// Users searches users via various criteria. -// -// GitHub API docs: https://developer.github.com/v3/search/#search-users -func (s *SearchService) Users(ctx context.Context, query string, opt *SearchOptions) (*UsersSearchResult, *Response, error) { - result := new(UsersSearchResult) - resp, err := s.search(ctx, "users", &searchParameters{Query: query}, opt, result) - return result, resp, err -} - -// Match represents a single text match. -type Match struct { - Text *string `json:"text,omitempty"` - Indices []int `json:"indices,omitempty"` -} - -// TextMatch represents a text match for a SearchResult -type TextMatch struct { - ObjectURL *string `json:"object_url,omitempty"` - ObjectType *string `json:"object_type,omitempty"` - Property *string `json:"property,omitempty"` - Fragment *string `json:"fragment,omitempty"` - Matches []Match `json:"matches,omitempty"` -} - -func (tm TextMatch) String() string { - return Stringify(tm) -} - -// CodeSearchResult represents the result of a code search. -type CodeSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - CodeResults []CodeResult `json:"items,omitempty"` -} - -// CodeResult represents a single search result. -type CodeResult struct { - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - SHA *string `json:"sha,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Repository *Repository `json:"repository,omitempty"` - TextMatches []TextMatch `json:"text_matches,omitempty"` -} - -func (c CodeResult) String() string { - return Stringify(c) -} - -// Code searches code via various criteria. -// -// GitHub API docs: https://developer.github.com/v3/search/#search-code -func (s *SearchService) Code(ctx context.Context, query string, opt *SearchOptions) (*CodeSearchResult, *Response, error) { - result := new(CodeSearchResult) - resp, err := s.search(ctx, "code", &searchParameters{Query: query}, opt, result) - return result, resp, err -} - -// LabelsSearchResult represents the result of a code search. -type LabelsSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Labels []*LabelResult `json:"items,omitempty"` -} - -// LabelResult represents a single search result. -type LabelResult struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Color *string `json:"color,omitempty"` - Default *bool `json:"default,omitempty"` - Description *string `json:"description,omitempty"` - Score *float64 `json:"score,omitempty"` -} - -func (l LabelResult) String() string { - return Stringify(l) -} - -// Labels searches labels in the repository with ID repoID via various criteria. -// -// GitHub API docs: https://developer.github.com/v3/search/#search-labels -func (s *SearchService) Labels(ctx context.Context, repoID int64, query string, opt *SearchOptions) (*LabelsSearchResult, *Response, error) { - result := new(LabelsSearchResult) - resp, err := s.search(ctx, "labels", &searchParameters{RepositoryID: &repoID, Query: query}, opt, result) - return result, resp, err -} - -// Helper function that executes search queries against different -// GitHub search types (repositories, commits, code, issues, users, labels) -func (s *SearchService) search(ctx context.Context, searchType string, parameters *searchParameters, opt *SearchOptions, result interface{}) (*Response, error) { - params, err := qs.Values(opt) - if err != nil { - return nil, err - } - q := strings.Replace(parameters.Query, " ", "+", -1) - if parameters.RepositoryID != nil { - params.Set("repository_id", strconv.FormatInt(*parameters.RepositoryID, 10)) - } - query := "q=" + url.PathEscape(q) - if v := params.Encode(); v != "" { - query = query + "&" + v - } - u := fmt.Sprintf("search/%s?%s", searchType, query) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - switch { - case searchType == "commits": - // Accept header for search commits preview endpoint - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCommitSearchPreview) - case searchType == "repositories": - // Accept header for search repositories based on topics preview endpoint - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - case searchType == "labels": - // Accept header for search labels based on label description preview endpoint. - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeLabelDescriptionSearchPreview) - case opt != nil && opt.TextMatch: - // Accept header defaults to "application/vnd.github.v3+json" - // We change it here to fetch back text-match metadata - req.Header.Set("Accept", "application/vnd.github.v3.text-match+json") - } - - return s.client.Do(ctx, req, result) -} diff --git a/vendor/github.com/google/go-github/github/strings.go b/vendor/github.com/google/go-github/github/strings.go deleted file mode 100644 index 431e1cc6c1d3..000000000000 --- a/vendor/github.com/google/go-github/github/strings.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "fmt" - "io" - - "reflect" -) - -var timestampType = reflect.TypeOf(Timestamp{}) - -// Stringify attempts to create a reasonable string representation of types in -// the GitHub library. It does things like resolve pointers to their values -// and omits struct fields with nil values. -func Stringify(message interface{}) string { - var buf bytes.Buffer - v := reflect.ValueOf(message) - stringifyValue(&buf, v) - return buf.String() -} - -// stringifyValue was heavily inspired by the goprotobuf library. - -func stringifyValue(w io.Writer, val reflect.Value) { - if val.Kind() == reflect.Ptr && val.IsNil() { - w.Write([]byte("")) - return - } - - v := reflect.Indirect(val) - - switch v.Kind() { - case reflect.String: - fmt.Fprintf(w, `"%s"`, v) - case reflect.Slice: - w.Write([]byte{'['}) - for i := 0; i < v.Len(); i++ { - if i > 0 { - w.Write([]byte{' '}) - } - - stringifyValue(w, v.Index(i)) - } - - w.Write([]byte{']'}) - return - case reflect.Struct: - if v.Type().Name() != "" { - w.Write([]byte(v.Type().String())) - } - - // special handling of Timestamp values - if v.Type() == timestampType { - fmt.Fprintf(w, "{%s}", v.Interface()) - return - } - - w.Write([]byte{'{'}) - - var sep bool - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - if fv.Kind() == reflect.Ptr && fv.IsNil() { - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - continue - } - - if sep { - w.Write([]byte(", ")) - } else { - sep = true - } - - w.Write([]byte(v.Type().Field(i).Name)) - w.Write([]byte{':'}) - stringifyValue(w, fv) - } - - w.Write([]byte{'}'}) - default: - if v.CanInterface() { - fmt.Fprint(w, v.Interface()) - } - } -} diff --git a/vendor/github.com/google/go-github/github/teams.go b/vendor/github.com/google/go-github/github/teams.go deleted file mode 100644 index c3773e00e377..000000000000 --- a/vendor/github.com/google/go-github/github/teams.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" - "time" -) - -// TeamsService provides access to the team-related functions -// in the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/teams/ -type TeamsService service - -// Team represents a team within a GitHub organization. Teams are used to -// manage access to an organization's repositories. -type Team struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - URL *string `json:"url,omitempty"` - Slug *string `json:"slug,omitempty"` - - // Permission specifies the default permission for repositories owned by the team. - Permission *string `json:"permission,omitempty"` - - // Privacy identifies the level of privacy this team should have. - // Possible values are: - // secret - only visible to organization owners and members of this team - // closed - visible to all members of this organization - // Default is "secret". - Privacy *string `json:"privacy,omitempty"` - - MembersCount *int `json:"members_count,omitempty"` - ReposCount *int `json:"repos_count,omitempty"` - Organization *Organization `json:"organization,omitempty"` - MembersURL *string `json:"members_url,omitempty"` - RepositoriesURL *string `json:"repositories_url,omitempty"` - Parent *Team `json:"parent,omitempty"` - - // LDAPDN is only available in GitHub Enterprise and when the team - // membership is synchronized with LDAP. - LDAPDN *string `json:"ldap_dn,omitempty"` -} - -func (t Team) String() string { - return Stringify(t) -} - -// Invitation represents a team member's invitation status. -type Invitation struct { - ID *int64 `json:"id,omitempty"` - Login *string `json:"login,omitempty"` - Email *string `json:"email,omitempty"` - // Role can be one of the values - 'direct_member', 'admin', 'billing_manager', 'hiring_manager', or 'reinstate'. - Role *string `json:"role,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - Inviter *User `json:"inviter,omitempty"` - TeamCount *int `json:"team_count,omitempty"` - InvitationTeamURL *string `json:"invitation_team_url,omitempty"` -} - -func (i Invitation) String() string { - return Stringify(i) -} - -// ListTeams lists all of the teams for an organization. -// -// GitHub API docs: https://developer.github.com/v3/teams/#list-teams -func (s *TeamsService) ListTeams(ctx context.Context, org string, opt *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// GetTeam fetches a team by ID. -// -// GitHub API docs: https://developer.github.com/v3/teams/#get-team -func (s *TeamsService) GetTeam(ctx context.Context, team int64) (*Team, *Response, error) { - u := fmt.Sprintf("teams/%v", team) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// NewTeam represents a team to be created or modified. -type NewTeam struct { - Name string `json:"name"` // Name of the team. (Required.) - Description *string `json:"description,omitempty"` - Maintainers []string `json:"maintainers,omitempty"` - RepoNames []string `json:"repo_names,omitempty"` - ParentTeamID *int64 `json:"parent_team_id,omitempty"` - - // Deprecated: Permission is deprecated when creating or editing a team in an org - // using the new GitHub permission model. It no longer identifies the - // permission a team has on its repos, but only specifies the default - // permission a repo is initially added with. Avoid confusion by - // specifying a permission value when calling AddTeamRepo. - Permission *string `json:"permission,omitempty"` - - // Privacy identifies the level of privacy this team should have. - // Possible values are: - // secret - only visible to organization owners and members of this team - // closed - visible to all members of this organization - // Default is "secret". - Privacy *string `json:"privacy,omitempty"` - - // LDAPDN may be used in GitHub Enterprise when the team membership - // is synchronized with LDAP. - LDAPDN *string `json:"ldap_dn,omitempty"` -} - -func (s NewTeam) String() string { - return Stringify(s) -} - -// CreateTeam creates a new team within an organization. -// -// GitHub API docs: https://developer.github.com/v3/teams/#create-team -func (s *TeamsService) CreateTeam(ctx context.Context, org string, team NewTeam) (*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams", org) - req, err := s.client.NewRequest("POST", u, team) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// EditTeam edits a team. -// -// GitHub API docs: https://developer.github.com/v3/teams/#edit-team -func (s *TeamsService) EditTeam(ctx context.Context, id int64, team NewTeam) (*Team, *Response, error) { - u := fmt.Sprintf("teams/%v", id) - req, err := s.client.NewRequest("PATCH", u, team) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// DeleteTeam deletes a team. -// -// GitHub API docs: https://developer.github.com/v3/teams/#delete-team -func (s *TeamsService) DeleteTeam(ctx context.Context, team int64) (*Response, error) { - u := fmt.Sprintf("teams/%v", team) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ListChildTeams lists child teams for a team. -// -// GitHub API docs: https://developer.github.com/v3/teams/#list-child-teams -func (s *TeamsService) ListChildTeams(ctx context.Context, teamID int64, opt *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("teams/%v/teams", teamID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// ListTeamRepos lists the repositories that the specified team has access to. -// -// GitHub API docs: https://developer.github.com/v3/teams/#list-team-repos -func (s *TeamsService) ListTeamRepos(ctx context.Context, team int64, opt *ListOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("teams/%v/repos", team) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when topics API fully launches. - headers := []string{mediaTypeTopicsPreview, mediaTypeNestedTeamsPreview} - req.Header.Set("Accept", strings.Join(headers, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// IsTeamRepo checks if a team manages the specified repository. If the -// repository is managed by team, a Repository is returned which includes the -// permissions team has for that repo. -// -// GitHub API docs: https://developer.github.com/v3/teams/#check-if-a-team-manages-a-repository -func (s *TeamsService) IsTeamRepo(ctx context.Context, team int64, owner string, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - headers := []string{mediaTypeOrgPermissionRepo, mediaTypeNestedTeamsPreview} - req.Header.Set("Accept", strings.Join(headers, ", ")) - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// TeamAddTeamRepoOptions specifies the optional parameters to the -// TeamsService.AddTeamRepo method. -type TeamAddTeamRepoOptions struct { - // Permission specifies the permission to grant the team on this repository. - // Possible values are: - // pull - team members can pull, but not push to or administer this repository - // push - team members can pull and push, but not administer this repository - // admin - team members can pull, push and administer this repository - // - // If not specified, the team's permission attribute will be used. - Permission string `json:"permission,omitempty"` -} - -// AddTeamRepo adds a repository to be managed by the specified team. The -// specified repository must be owned by the organization to which the team -// belongs, or a direct fork of a repository owned by the organization. -// -// GitHub API docs: https://developer.github.com/v3/teams/#add-team-repo -func (s *TeamsService) AddTeamRepo(ctx context.Context, team int64, owner string, repo string, opt *TeamAddTeamRepoOptions) (*Response, error) { - u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveTeamRepo removes a repository from being managed by the specified -// team. Note that this does not delete the repository, it just removes it -// from the team. -// -// GitHub API docs: https://developer.github.com/v3/teams/#remove-team-repo -func (s *TeamsService) RemoveTeamRepo(ctx context.Context, team int64, owner string, repo string) (*Response, error) { - u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListUserTeams lists a user's teams -// GitHub API docs: https://developer.github.com/v3/teams/#list-user-teams -func (s *TeamsService) ListUserTeams(ctx context.Context, opt *ListOptions) ([]*Team, *Response, error) { - u := "user/teams" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/teams_discussion_comments.go b/vendor/github.com/google/go-github/github/teams_discussion_comments.go deleted file mode 100644 index a0206b9c92be..000000000000 --- a/vendor/github.com/google/go-github/github/teams_discussion_comments.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// DiscussionComment represents a GitHub dicussion in a team. -type DiscussionComment struct { - Author *User `json:"author,omitempty"` - Body *string `json:"body,omitempty"` - BodyHTML *string `json:"body_html,omitempty"` - BodyVersion *string `json:"body_version,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - LastEditedAt *Timestamp `json:"last_edited_at,omitempty"` - DiscussionURL *string `json:"discussion_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Number *int `json:"number,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` -} - -func (c DiscussionComment) String() string { - return Stringify(c) -} - -// DiscussionCommentListOptions specifies optional parameters to the -// TeamServices.ListComments method. -type DiscussionCommentListOptions struct { - // Sorts the discussion comments by the date they were created. - // Accepted values are asc and desc. Default is desc. - Direction string `url:"direction,omitempty"` -} - -// ListComments lists all comments on a team discussion. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussion_comments/#list-comments -func (s *TeamsService) ListComments(ctx context.Context, teamID int64, discussionNumber int, options *DiscussionCommentListOptions) ([]*DiscussionComment, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments", teamID, discussionNumber) - u, err := addOptions(u, options) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - var comments []*DiscussionComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment gets a specific comment on a team discussion. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussion_comments/#get-a-single-comment -func (s *TeamsService) GetComment(ctx context.Context, teamID int64, discussionNumber, commentNumber int) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v", teamID, discussionNumber, commentNumber) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// CreateComment creates a new discussion post on a team discussion. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussion_comments/#create-a-comment -func (s *TeamsService) CreateComment(ctx context.Context, teamID int64, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments", teamID, discsusionNumber) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// EditComment edits the body text of a discussion comment. -// Authenticated user must grant write:discussion scope. -// User is allowed to edit body of a comment only. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussion_comments/#edit-a-comment -func (s *TeamsService) EditComment(ctx context.Context, teamID int64, discussionNumber, commentNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v", teamID, discussionNumber, commentNumber) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// DeleteComment deletes a comment on a team discussion. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussion_comments/#delete-a-comment -func (s *TeamsService) DeleteComment(ctx context.Context, teamID int64, discussionNumber, commentNumber int) (*Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v", teamID, discussionNumber, commentNumber) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/teams_discussions.go b/vendor/github.com/google/go-github/github/teams_discussions.go deleted file mode 100644 index f491c9d1d6e9..000000000000 --- a/vendor/github.com/google/go-github/github/teams_discussions.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TeamDiscussion represents a GitHub dicussion in a team. -type TeamDiscussion struct { - Author *User `json:"author,omitempty"` - Body *string `json:"body,omitempty"` - BodyHTML *string `json:"body_html,omitempty"` - BodyVersion *string `json:"body_version,omitempty"` - CommentsCount *int `json:"comments_count,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - LastEditedAt *Timestamp `json:"last_edited_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Number *int `json:"number,omitempty"` - Pinned *bool `json:"pinned,omitempty"` - Private *bool `json:"private,omitempty"` - TeamURL *string `json:"team_url,omitempty"` - Title *string `json:"title,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` -} - -func (d TeamDiscussion) String() string { - return Stringify(d) -} - -// DiscussionListOptions specifies optional parameters to the -// TeamServices.ListDiscussions method. -type DiscussionListOptions struct { - // Sorts the discussion by the date they were created. - // Accepted values are asc and desc. Default is desc. - Direction string `url:"direction,omitempty"` -} - -// ListDiscussions lists all discussions on team's page. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussions/#list-discussions -func (s *TeamsService) ListDiscussions(ctx context.Context, teamID int64, options *DiscussionListOptions) ([]*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions", teamID) - u, err := addOptions(u, options) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - var teamDiscussions []*TeamDiscussion - resp, err := s.client.Do(ctx, req, &teamDiscussions) - if err != nil { - return nil, resp, err - } - - return teamDiscussions, resp, nil -} - -// GetDiscussion gets a specific discussion on a team's page. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussions/#get-a-single-discussion -func (s *TeamsService) GetDiscussion(ctx context.Context, teamID int64, discussionNumber int) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v", teamID, discussionNumber) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// CreateDiscussion creates a new discussion post on a team's page. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussions/#create-a-discussion -func (s *TeamsService) CreateDiscussion(ctx context.Context, teamID int64, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions", teamID) - req, err := s.client.NewRequest("POST", u, discussion) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// EditDiscussion edits the title and body text of a discussion post. -// Authenticated user must grant write:discussion scope. -// User is allowed to change Title and Body of a discussion only. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussions/#edit-a-discussion -func (s *TeamsService) EditDiscussion(ctx context.Context, teamID int64, discussionNumber int, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v", teamID, discussionNumber) - req, err := s.client.NewRequest("PATCH", u, discussion) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// DeleteDiscussion deletes a discussion from team's page. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://developer.github.com/v3/teams/discussions/#delete-a-discussion -func (s *TeamsService) DeleteDiscussion(ctx context.Context, teamID int64, discussionNumber int) (*Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v", teamID, discussionNumber) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTeamDiscussionsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/teams_members.go b/vendor/github.com/google/go-github/github/teams_members.go deleted file mode 100644 index d5cfa0dc7dd5..000000000000 --- a/vendor/github.com/google/go-github/github/teams_members.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TeamListTeamMembersOptions specifies the optional parameters to the -// TeamsService.ListTeamMembers method. -type TeamListTeamMembersOptions struct { - // Role filters members returned by their role in the team. Possible - // values are "all", "member", "maintainer". Default is "all". - Role string `url:"role,omitempty"` - - ListOptions -} - -// ListTeamMembers lists all of the users who are members of the specified -// team. -// -// GitHub API docs: https://developer.github.com/v3/teams/members/#list-team-members -func (s *TeamsService) ListTeamMembers(ctx context.Context, team int64, opt *TeamListTeamMembersOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("teams/%v/members", team) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// IsTeamMember checks if a user is a member of the specified team. -// -// GitHub API docs: https://developer.github.com/v3/teams/members/#get-team-member -// -// Deprecated: This API has been marked as deprecated in the Github API docs, -// TeamsService.GetTeamMembership method should be used instead. -func (s *TeamsService) IsTeamMember(ctx context.Context, team int64, user string) (bool, *Response, error) { - u := fmt.Sprintf("teams/%v/members/%v", team, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// GetTeamMembership returns the membership status for a user in a team. -// -// GitHub API docs: https://developer.github.com/v3/teams/members/#get-team-membership -func (s *TeamsService) GetTeamMembership(ctx context.Context, team int64, user string) (*Membership, *Response, error) { - u := fmt.Sprintf("teams/%v/memberships/%v", team, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeNestedTeamsPreview) - - t := new(Membership) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// TeamAddTeamMembershipOptions specifies the optional -// parameters to the TeamsService.AddTeamMembership method. -type TeamAddTeamMembershipOptions struct { - // Role specifies the role the user should have in the team. Possible - // values are: - // member - a normal member of the team - // maintainer - a team maintainer. Able to add/remove other team - // members, promote other team members to team - // maintainer, and edit the team’s name and description - // - // Default value is "member". - Role string `json:"role,omitempty"` -} - -// AddTeamMembership adds or invites a user to a team. -// -// In order to add a membership between a user and a team, the authenticated -// user must have 'admin' permissions to the team or be an owner of the -// organization that the team is associated with. -// -// If the user is already a part of the team's organization (meaning they're on -// at least one other team in the organization), this endpoint will add the -// user to the team. -// -// If the user is completely unaffiliated with the team's organization (meaning -// they're on none of the organization's teams), this endpoint will send an -// invitation to the user via email. This newly-created membership will be in -// the "pending" state until the user accepts the invitation, at which point -// the membership will transition to the "active" state and the user will be -// added as a member of the team. -// -// GitHub API docs: https://developer.github.com/v3/teams/members/#add-or-update-team-membership -func (s *TeamsService) AddTeamMembership(ctx context.Context, team int64, user string, opt *TeamAddTeamMembershipOptions) (*Membership, *Response, error) { - u := fmt.Sprintf("teams/%v/memberships/%v", team, user) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, nil, err - } - - t := new(Membership) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// RemoveTeamMembership removes a user from a team. -// -// GitHub API docs: https://developer.github.com/v3/teams/members/#remove-team-membership -func (s *TeamsService) RemoveTeamMembership(ctx context.Context, team int64, user string) (*Response, error) { - u := fmt.Sprintf("teams/%v/memberships/%v", team, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListPendingTeamInvitations get pending invitaion list in team. -// Warning: The API may change without advance notice during the preview period. -// Preview features are not supported for production use. -// -// GitHub API docs: https://developer.github.com/v3/teams/members/#list-pending-team-invitations -func (s *TeamsService) ListPendingTeamInvitations(ctx context.Context, team int64, opt *ListOptions) ([]*Invitation, *Response, error) { - u := fmt.Sprintf("teams/%v/invitations", team) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pendingInvitations []*Invitation - resp, err := s.client.Do(ctx, req, &pendingInvitations) - if err != nil { - return nil, resp, err - } - - return pendingInvitations, resp, nil -} diff --git a/vendor/github.com/google/go-github/github/timestamp.go b/vendor/github.com/google/go-github/github/timestamp.go deleted file mode 100644 index 90929d5757d6..000000000000 --- a/vendor/github.com/google/go-github/github/timestamp.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "strconv" - "time" -) - -// Timestamp represents a time that can be unmarshalled from a JSON string -// formatted as either an RFC3339 or Unix timestamp. This is necessary for some -// fields since the GitHub API is inconsistent in how it represents times. All -// exported methods of time.Time can be called on Timestamp. -type Timestamp struct { - time.Time -} - -func (t Timestamp) String() string { - return t.Time.String() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Time is expected in RFC3339 or Unix format. -func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { - str := string(data) - i, err := strconv.ParseInt(str, 10, 64) - if err == nil { - t.Time = time.Unix(i, 0) - } else { - t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) - } - return -} - -// Equal reports whether t and u are equal based on time.Equal -func (t Timestamp) Equal(u Timestamp) bool { - return t.Time.Equal(u.Time) -} diff --git a/vendor/github.com/google/go-github/github/users.go b/vendor/github.com/google/go-github/github/users.go deleted file mode 100644 index 87cfa7f84b71..000000000000 --- a/vendor/github.com/google/go-github/github/users.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// UsersService handles communication with the user related -// methods of the GitHub API. -// -// GitHub API docs: https://developer.github.com/v3/users/ -type UsersService service - -// User represents a GitHub user. -type User struct { - Login *string `json:"login,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - Name *string `json:"name,omitempty"` - Company *string `json:"company,omitempty"` - Blog *string `json:"blog,omitempty"` - Location *string `json:"location,omitempty"` - Email *string `json:"email,omitempty"` - Hireable *bool `json:"hireable,omitempty"` - Bio *string `json:"bio,omitempty"` - PublicRepos *int `json:"public_repos,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` - Followers *int `json:"followers,omitempty"` - Following *int `json:"following,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - SuspendedAt *Timestamp `json:"suspended_at,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - TotalPrivateRepos *int `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - DiskUsage *int `json:"disk_usage,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - TwoFactorAuthentication *bool `json:"two_factor_authentication,omitempty"` - Plan *Plan `json:"plan,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://developer.github.com/v3/search/#text-match-metadata - TextMatches []TextMatch `json:"text_matches,omitempty"` - - // Permissions identifies the permissions that a user has on a given - // repository. This is only populated when calling Repositories.ListCollaborators. - Permissions *map[string]bool `json:"permissions,omitempty"` -} - -func (u User) String() string { - return Stringify(u) -} - -// Get fetches a user. Passing the empty string will fetch the authenticated -// user. -// -// GitHub API docs: https://developer.github.com/v3/users/#get-a-single-user -// and: https://developer.github.com/v3/users/#get-the-authenticated-user -func (s *UsersService) Get(ctx context.Context, user string) (*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v", user) - } else { - u = "user" - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - uResp := new(User) - resp, err := s.client.Do(ctx, req, uResp) - if err != nil { - return nil, resp, err - } - - return uResp, resp, nil -} - -// GetByID fetches a user. -// -// Note: GetByID uses the undocumented GitHub API endpoint /user/:id. -func (s *UsersService) GetByID(ctx context.Context, id int64) (*User, *Response, error) { - u := fmt.Sprintf("user/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - user := new(User) - resp, err := s.client.Do(ctx, req, user) - if err != nil { - return nil, resp, err - } - - return user, resp, nil -} - -// Edit the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/#update-the-authenticated-user -func (s *UsersService) Edit(ctx context.Context, user *User) (*User, *Response, error) { - u := "user" - req, err := s.client.NewRequest("PATCH", u, user) - if err != nil { - return nil, nil, err - } - - uResp := new(User) - resp, err := s.client.Do(ctx, req, uResp) - if err != nil { - return nil, resp, err - } - - return uResp, resp, nil -} - -// HovercardOptions specifies optional parameters to the UsersService.GetHovercard -// method. -type HovercardOptions struct { - // SubjectType specifies the additional information to be received about the hovercard. - // Possible values are: organization, repository, issue, pull_request. (Required when using subject_id.) - SubjectType string `url:"subject_type"` - - // SubjectID specifies the ID for the SubjectType. (Required when using subject_type.) - SubjectID string `url:"subject_id"` -} - -// Hovercard represents hovercard information about a user. -type Hovercard struct { - Contexts []*UserContext `json:"contexts,omitempty"` -} - -// UserContext represents the contextual information about user. -type UserContext struct { - Message *string `json:"message,omitempty"` - Octicon *string `json:"octicon,omitempty"` -} - -// GetHovercard fetches contextual information about user. It requires authentication -// via Basic Auth or via OAuth with the repo scope. -// -// GitHub API docs: https://developer.github.com/v3/users/#get-contextual-information-about-a-user -func (s *UsersService) GetHovercard(ctx context.Context, user string, opt *HovercardOptions) (*Hovercard, *Response, error) { - u := fmt.Sprintf("users/%v/hovercard", user) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeHovercardPreview) - - hc := new(Hovercard) - resp, err := s.client.Do(ctx, req, hc) - if err != nil { - return nil, resp, err - } - - return hc, resp, nil -} - -// UserListOptions specifies optional parameters to the UsersService.ListAll -// method. -type UserListOptions struct { - // ID of the last user seen - Since int64 `url:"since,omitempty"` - - // Note: Pagination is powered exclusively by the Since parameter, - // ListOptions.Page has no effect. - // ListOptions.PerPage controls an undocumented GitHub API parameter. - ListOptions -} - -// ListAll lists all GitHub users. -// -// To paginate through all users, populate 'Since' with the ID of the last user. -// -// GitHub API docs: https://developer.github.com/v3/users/#get-all-users -func (s *UsersService) ListAll(ctx context.Context, opt *UserListOptions) ([]*User, *Response, error) { - u, err := addOptions("users", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// ListInvitations lists all currently-open repository invitations for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-a-users-repository-invitations -func (s *UsersService) ListInvitations(ctx context.Context, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) { - u, err := addOptions("user/repository_invitations", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - invites := []*RepositoryInvitation{} - resp, err := s.client.Do(ctx, req, &invites) - if err != nil { - return nil, resp, err - } - - return invites, resp, nil -} - -// AcceptInvitation accepts the currently-open repository invitation for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/repos/invitations/#accept-a-repository-invitation -func (s *UsersService) AcceptInvitation(ctx context.Context, invitationID int64) (*Response, error) { - u := fmt.Sprintf("user/repository_invitations/%v", invitationID) - req, err := s.client.NewRequest("PATCH", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeclineInvitation declines the currently-open repository invitation for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/repos/invitations/#decline-a-repository-invitation -func (s *UsersService) DeclineInvitation(ctx context.Context, invitationID int64) (*Response, error) { - u := fmt.Sprintf("user/repository_invitations/%v", invitationID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_administration.go b/vendor/github.com/google/go-github/github/users_administration.go deleted file mode 100644 index e042398d8c54..000000000000 --- a/vendor/github.com/google/go-github/github/users_administration.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#promote-an-ordinary-user-to-a-site-administrator -func (s *UsersService) PromoteSiteAdmin(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/site_admin", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#demote-a-site-administrator-to-an-ordinary-user -func (s *UsersService) DemoteSiteAdmin(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/site_admin", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Suspend a user on a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#suspend-a-user -func (s *UsersService) Suspend(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/suspended", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unsuspend a user on a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#unsuspend-a-user -func (s *UsersService) Unsuspend(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/suspended", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_blocking.go b/vendor/github.com/google/go-github/github/users_blocking.go deleted file mode 100644 index 39e45601cc1e..000000000000 --- a/vendor/github.com/google/go-github/github/users_blocking.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListBlockedUsers lists all the blocked users by the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/blocking/#list-blocked-users -func (s *UsersService) ListBlockedUsers(ctx context.Context, opt *ListOptions) ([]*User, *Response, error) { - u := "user/blocks" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - var blockedUsers []*User - resp, err := s.client.Do(ctx, req, &blockedUsers) - if err != nil { - return nil, resp, err - } - - return blockedUsers, resp, nil -} - -// IsBlocked reports whether specified user is blocked by the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/blocking/#check-whether-youve-blocked-a-user -func (s *UsersService) IsBlocked(ctx context.Context, user string) (bool, *Response, error) { - u := fmt.Sprintf("user/blocks/%v", user) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - resp, err := s.client.Do(ctx, req, nil) - isBlocked, err := parseBoolResponse(err) - return isBlocked, resp, err -} - -// BlockUser blocks specified user for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/blocking/#block-a-user -func (s *UsersService) BlockUser(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/blocks/%v", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnblockUser unblocks specified user for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/blocking/#unblock-a-user -func (s *UsersService) UnblockUser(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/blocks/%v", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_emails.go b/vendor/github.com/google/go-github/github/users_emails.go deleted file mode 100644 index 0bbd4627e3b8..000000000000 --- a/vendor/github.com/google/go-github/github/users_emails.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "context" - -// UserEmail represents user's email address -type UserEmail struct { - Email *string `json:"email,omitempty"` - Primary *bool `json:"primary,omitempty"` - Verified *bool `json:"verified,omitempty"` -} - -// ListEmails lists all email addresses for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user -func (s *UsersService) ListEmails(ctx context.Context, opt *ListOptions) ([]*UserEmail, *Response, error) { - u := "user/emails" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var emails []*UserEmail - resp, err := s.client.Do(ctx, req, &emails) - if err != nil { - return nil, resp, err - } - - return emails, resp, nil -} - -// AddEmails adds email addresses of the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/emails/#add-email-addresses -func (s *UsersService) AddEmails(ctx context.Context, emails []string) ([]*UserEmail, *Response, error) { - u := "user/emails" - req, err := s.client.NewRequest("POST", u, emails) - if err != nil { - return nil, nil, err - } - - var e []*UserEmail - resp, err := s.client.Do(ctx, req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// DeleteEmails deletes email addresses from authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/emails/#delete-email-addresses -func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Response, error) { - u := "user/emails" - req, err := s.client.NewRequest("DELETE", u, emails) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_followers.go b/vendor/github.com/google/go-github/github/users_followers.go deleted file mode 100644 index c2224096a623..000000000000 --- a/vendor/github.com/google/go-github/github/users_followers.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListFollowers lists the followers for a user. Passing the empty string will -// fetch followers for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/followers/#list-followers-of-a-user -func (s *UsersService) ListFollowers(ctx context.Context, user string, opt *ListOptions) ([]*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/followers", user) - } else { - u = "user/followers" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// ListFollowing lists the people that a user is following. Passing the empty -// string will list people the authenticated user is following. -// -// GitHub API docs: https://developer.github.com/v3/users/followers/#list-users-followed-by-another-user -func (s *UsersService) ListFollowing(ctx context.Context, user string, opt *ListOptions) ([]*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/following", user) - } else { - u = "user/following" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// IsFollowing checks if "user" is following "target". Passing the empty -// string for "user" will check if the authenticated user is following "target". -// -// GitHub API docs: https://developer.github.com/v3/users/followers/#check-if-you-are-following-a-user -func (s *UsersService) IsFollowing(ctx context.Context, user, target string) (bool, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/following/%v", user, target) - } else { - u = fmt.Sprintf("user/following/%v", target) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - following, err := parseBoolResponse(err) - return following, resp, err -} - -// Follow will cause the authenticated user to follow the specified user. -// -// GitHub API docs: https://developer.github.com/v3/users/followers/#follow-a-user -func (s *UsersService) Follow(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/following/%v", user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unfollow will cause the authenticated user to unfollow the specified user. -// -// GitHub API docs: https://developer.github.com/v3/users/followers/#unfollow-a-user -func (s *UsersService) Unfollow(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/following/%v", user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_gpg_keys.go b/vendor/github.com/google/go-github/github/users_gpg_keys.go deleted file mode 100644 index 07ed38dcbef5..000000000000 --- a/vendor/github.com/google/go-github/github/users_gpg_keys.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// GPGKey represents a GitHub user's public GPG key used to verify GPG signed commits and tags. -// -// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/ -type GPGKey struct { - ID *int64 `json:"id,omitempty"` - PrimaryKeyID *int64 `json:"primary_key_id,omitempty"` - KeyID *string `json:"key_id,omitempty"` - PublicKey *string `json:"public_key,omitempty"` - Emails []GPGEmail `json:"emails,omitempty"` - Subkeys []GPGKey `json:"subkeys,omitempty"` - CanSign *bool `json:"can_sign,omitempty"` - CanEncryptComms *bool `json:"can_encrypt_comms,omitempty"` - CanEncryptStorage *bool `json:"can_encrypt_storage,omitempty"` - CanCertify *bool `json:"can_certify,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` -} - -// String stringifies a GPGKey. -func (k GPGKey) String() string { - return Stringify(k) -} - -// GPGEmail represents an email address associated to a GPG key. -type GPGEmail struct { - Email *string `json:"email,omitempty"` - Verified *bool `json:"verified,omitempty"` -} - -// ListGPGKeys lists the public GPG keys for a user. Passing the empty -// string will fetch keys for the authenticated user. It requires authentication -// via Basic Auth or via OAuth with at least read:gpg_key scope. -// -// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-gpg-keys-for-a-user -func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opt *ListOptions) ([]*GPGKey, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/gpg_keys", user) - } else { - u = "user/gpg_keys" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*GPGKey - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetGPGKey gets extended details for a single GPG key. It requires authentication -// via Basic Auth or via OAuth with at least read:gpg_key scope. -// -// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#get-a-single-gpg-key -func (s *UsersService) GetGPGKey(ctx context.Context, id int64) (*GPGKey, *Response, error) { - u := fmt.Sprintf("user/gpg_keys/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := &GPGKey{} - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateGPGKey creates a GPG key. It requires authenticatation via Basic Auth -// or OAuth with at least write:gpg_key scope. -// -// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#create-a-gpg-key -func (s *UsersService) CreateGPGKey(ctx context.Context, armoredPublicKey string) (*GPGKey, *Response, error) { - gpgKey := &struct { - ArmoredPublicKey string `json:"armored_public_key"` - }{ArmoredPublicKey: armoredPublicKey} - req, err := s.client.NewRequest("POST", "user/gpg_keys", gpgKey) - if err != nil { - return nil, nil, err - } - - key := &GPGKey{} - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// DeleteGPGKey deletes a GPG key. It requires authentication via Basic Auth or -// via OAuth with at least admin:gpg_key scope. -// -// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#delete-a-gpg-key -func (s *UsersService) DeleteGPGKey(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("user/gpg_keys/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_keys.go b/vendor/github.com/google/go-github/github/users_keys.go deleted file mode 100644 index ddc832a1ece7..000000000000 --- a/vendor/github.com/google/go-github/github/users_keys.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Key represents a public SSH key used to authenticate a user or deploy script. -type Key struct { - ID *int64 `json:"id,omitempty"` - Key *string `json:"key,omitempty"` - URL *string `json:"url,omitempty"` - Title *string `json:"title,omitempty"` - ReadOnly *bool `json:"read_only,omitempty"` -} - -func (k Key) String() string { - return Stringify(k) -} - -// ListKeys lists the verified public keys for a user. Passing the empty -// string will fetch keys for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user -func (s *UsersService) ListKeys(ctx context.Context, user string, opt *ListOptions) ([]*Key, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/keys", user) - } else { - u = "user/keys" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*Key - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetKey fetches a single public key. -// -// GitHub API docs: https://developer.github.com/v3/users/keys/#get-a-single-public-key -func (s *UsersService) GetKey(ctx context.Context, id int64) (*Key, *Response, error) { - u := fmt.Sprintf("user/keys/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(Key) - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateKey adds a public key for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/users/keys/#create-a-public-key -func (s *UsersService) CreateKey(ctx context.Context, key *Key) (*Key, *Response, error) { - u := "user/keys" - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(ctx, req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteKey deletes a public key. -// -// GitHub API docs: https://developer.github.com/v3/users/keys/#delete-a-public-key -func (s *UsersService) DeleteKey(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("user/keys/%v", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/github/with_appengine.go b/vendor/github.com/google/go-github/github/with_appengine.go deleted file mode 100644 index 59ce26b2ea37..000000000000 --- a/vendor/github.com/google/go-github/github/with_appengine.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// This file provides glue for making github work on App Engine. - -package github - -import ( - "context" - "net/http" -) - -func withContext(ctx context.Context, req *http.Request) *http.Request { - // No-op because App Engine adds context to a request differently. - return req -} diff --git a/vendor/github.com/google/go-github/github/without_appengine.go b/vendor/github.com/google/go-github/github/without_appengine.go deleted file mode 100644 index 6f8fdac56030..000000000000 --- a/vendor/github.com/google/go-github/github/without_appengine.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -// This file provides glue for making github work without App Engine. - -package github - -import ( - "context" - "net/http" -) - -func withContext(ctx context.Context, req *http.Request) *http.Request { - return req.WithContext(ctx) -} diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE deleted file mode 100644 index ae121a1e46df..000000000000 --- a/vendor/github.com/google/go-querystring/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Google. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go deleted file mode 100644 index 37080b19b5d9..000000000000 --- a/vendor/github.com/google/go-querystring/query/encode.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package query implements encoding of structs into URL query parameters. -// -// As a simple example: -// -// type Options struct { -// Query string `url:"q"` -// ShowAll bool `url:"all"` -// Page int `url:"page"` -// } -// -// opt := Options{ "foo", true, 2 } -// v, _ := query.Values(opt) -// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" -// -// The exact mapping between Go values and url.Values is described in the -// documentation for the Values() function. -package query - -import ( - "bytes" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var timeType = reflect.TypeOf(time.Time{}) - -var encoderType = reflect.TypeOf(new(Encoder)).Elem() - -// Encoder is an interface implemented by any type that wishes to encode -// itself into URL values in a non-standard way. -type Encoder interface { - EncodeValues(key string, v *url.Values) error -} - -// Values returns the url.Values encoding of v. -// -// Values expects to be passed a struct, and traverses it recursively using the -// following encoding rules. -// -// Each exported struct field is encoded as a URL parameter unless -// -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option -// -// The empty values are false, 0, any nil pointer or interface value, any array -// slice, map, or string of length zero, and any time.Time that returns true -// for IsZero(). -// -// The URL parameter name defaults to the struct field name but can be -// specified in the struct field's tag value. The "url" key in the struct -// field's tag value is the key name, followed by an optional comma and -// options. For example: -// -// // Field is ignored by this package. -// Field int `url:"-"` -// -// // Field appears as URL parameter "myName". -// Field int `url:"myName"` -// -// // Field appears as URL parameter "myName" and the field is omitted if -// // its value is empty -// Field int `url:"myName,omitempty"` -// -// // Field appears as URL parameter "Field" (the default), but the field -// // is skipped if empty. Note the leading comma. -// Field int `url:",omitempty"` -// -// For encoding individual field values, the following type-dependent rules -// apply: -// -// Boolean values default to encoding as the strings "true" or "false". -// Including the "int" option signals that the field should be encoded as the -// strings "1" or "0". -// -// time.Time values default to encoding as RFC3339 timestamps. Including the -// "unix" option signals that the field should be encoded as a Unix time (see -// time.Unix()) -// -// Slice and Array values default to encoding as multiple URL values of the -// same name. Including the "comma" option signals that the field should be -// encoded as a single comma-delimited value. Including the "space" option -// similarly encodes the value as a single space-delimited string. Including -// the "semicolon" option will encode the value as a semicolon-delimited string. -// Including the "brackets" option signals that the multiple URL values should -// have "[]" appended to the value name. "numbered" will append a number to -// the end of each incidence of the value name, example: -// name0=value0&name1=value1, etc. -// -// Anonymous struct fields are usually encoded as if their inner exported -// fields were fields in the outer struct, subject to the standard Go -// visibility rules. An anonymous struct field with a name given in its URL -// tag is treated as having that name, rather than being anonymous. -// -// Non-nil pointer values are encoded as the value pointed to. -// -// Nested structs are encoded including parent fields in value names for -// scoping. e.g: -// -// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" -// -// All other values are encoded using their default string representation. -// -// Multiple fields that encode to the same URL parameter name will be included -// as multiple URL values of the same name. -func Values(v interface{}) (url.Values, error) { - values := make(url.Values) - val := reflect.ValueOf(v) - for val.Kind() == reflect.Ptr { - if val.IsNil() { - return values, nil - } - val = val.Elem() - } - - if v == nil { - return values, nil - } - - if val.Kind() != reflect.Struct { - return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) - } - - err := reflectValue(values, val, "") - return values, err -} - -// reflectValue populates the values parameter from the struct fields in val. -// Embedded structs are followed recursively (using the rules defined in the -// Values function documentation) breadth-first. -func reflectValue(values url.Values, val reflect.Value, scope string) error { - var embedded []reflect.Value - - typ := val.Type() - for i := 0; i < typ.NumField(); i++ { - sf := typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - - sv := val.Field(i) - tag := sf.Tag.Get("url") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if name == "" { - if sf.Anonymous && sv.Kind() == reflect.Struct { - // save embedded struct for later processing - embedded = append(embedded, sv) - continue - } - - name = sf.Name - } - - if scope != "" { - name = scope + "[" + name + "]" - } - - if opts.Contains("omitempty") && isEmptyValue(sv) { - continue - } - - if sv.Type().Implements(encoderType) { - if !reflect.Indirect(sv).IsValid() { - sv = reflect.New(sv.Type().Elem()) - } - - m := sv.Interface().(Encoder) - if err := m.EncodeValues(name, &values); err != nil { - return err - } - continue - } - - if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { - var del byte - if opts.Contains("comma") { - del = ',' - } else if opts.Contains("space") { - del = ' ' - } else if opts.Contains("semicolon") { - del = ';' - } else if opts.Contains("brackets") { - name = name + "[]" - } - - if del != 0 { - s := new(bytes.Buffer) - first := true - for i := 0; i < sv.Len(); i++ { - if first { - first = false - } else { - s.WriteByte(del) - } - s.WriteString(valueString(sv.Index(i), opts)) - } - values.Add(name, s.String()) - } else { - for i := 0; i < sv.Len(); i++ { - k := name - if opts.Contains("numbered") { - k = fmt.Sprintf("%s%d", name, i) - } - values.Add(k, valueString(sv.Index(i), opts)) - } - } - continue - } - - for sv.Kind() == reflect.Ptr { - if sv.IsNil() { - break - } - sv = sv.Elem() - } - - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts)) - continue - } - - if sv.Kind() == reflect.Struct { - reflectValue(values, sv, name) - continue - } - - values.Add(name, valueString(sv, opts)) - } - - for _, f := range embedded { - if err := reflectValue(values, f, scope); err != nil { - return err - } - } - - return nil -} - -// valueString returns the string representation of a value. -func valueString(v reflect.Value, opts tagOptions) string { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return "" - } - v = v.Elem() - } - - if v.Kind() == reflect.Bool && opts.Contains("int") { - if v.Bool() { - return "1" - } - return "0" - } - - if v.Type() == timeType { - t := v.Interface().(time.Time) - if opts.Contains("unix") { - return strconv.FormatInt(t.Unix(), 10) - } - return t.Format(time.RFC3339) - } - - return fmt.Sprint(v.Interface()) -} - -// isEmptyValue checks if a value should be considered empty for the purposes -// of omitting fields with the "omitempty" option. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - if v.Type() == timeType { - return v.Interface().(time.Time).IsZero() - } - - return false -} - -// tagOptions is the string following a comma in a struct field's "url" tag, or -// the empty string. It does not include the leading comma. -type tagOptions []string - -// parseTag splits a struct field's url tag into its name and comma-separated -// options. -func parseTag(tag string) (string, tagOptions) { - s := strings.Split(tag, ",") - return s[0], s[1:] -} - -// Contains checks whether the tagOptions contains the specified option. -func (o tagOptions) Contains(option string) bool { - for _, s := range o { - if s == option { - return true - } - } - return false -} diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/google/gofuzz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d4a64f..000000000000 --- a/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 4f888fbc8fa5..000000000000 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,453 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "time" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(time.Now().UnixNano())), - nilChance: .2, - minElements: 1, - maxElements: 10, - } - return f -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// Not safe for cyclic or tree-like structs! -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.doFuzz(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.doFuzz(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && f.tryCustom(v.Addr()) { - return - } - if f.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, f.r) - return - } - switch v.Kind() { - case reflect.Map: - if f.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := f.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - f.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - f.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if f.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - f.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if f.genShouldFill() { - n := f.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - f.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if f.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - f.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - f.doFuzz(v.Field(i), 0) - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (f *Fuzzer) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := f.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{f: f, Rand: f.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = f.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - f: f, - Rand: f.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - f *Fuzzer - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.f.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.f.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false -} - -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b1270ff0c..000000000000 --- a/vendor/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go deleted file mode 100644 index 0e32451a320c..000000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ /dev/null @@ -1,8728 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v2" - "regexp" - "strings" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := in.(bool) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in interface{}, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes, _ := yaml.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := v14.([]interface{}) - if ok { - for _, item := range a { - y, err := NewTag(item, compiler.NewContext("tags", context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = v18.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"$ref", "description"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in interface{}, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedString{} - pair.Name = k - pair.Value = v.(string) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := v1.([]interface{}) - if ok { - x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v8.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := v10.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v9.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.MultipleOf = v6 - case float32: - x.MultipleOf = float64(v6) - case uint64: - x.MultipleOf = float64(v6) - case uint32: - x.MultipleOf = float64(v6) - case int64: - x.MultipleOf = float64(v6) - case int32: - x.MultipleOf = float64(v6) - case int: - x.MultipleOf = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - switch v7 := v7.(type) { - case float64: - x.Maximum = v7 - case float32: - x.Maximum = float64(v7) - case uint64: - x.Maximum = float64(v7) - case uint32: - x.Maximum = float64(v7) - case int64: - x.Maximum = float64(v7) - case int32: - x.Maximum = float64(v7) - case int: - x.Maximum = float64(v7) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = v8.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - switch v9 := v9.(type) { - case float64: - x.Minimum = v9 - case float32: - x.Minimum = float64(v9) - case uint64: - x.Minimum = float64(v9) - case uint32: - x.Minimum = float64(v9) - case int64: - x.Minimum = float64(v9) - case int32: - x.Minimum = float64(v9) - case int: - x.Minimum = float64(v9) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = v10.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := v12.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = v13.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = v16.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := v19.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := v24.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSchema(item, compiler.NewContext("allOf", context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = v26.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = v27.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - a, ok := in.([]interface{}) - if !ok { - message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Value = make([]string, 0) - for _, s := range a { - x.Value = append(x.Value, s.(string)) - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - switch in := in.(type) { - case string: - x.Value = make([]string, 0) - x.Value = append(x.Value, in) - case []interface{}: - x.Value = make([]string, 0) - for _, v := range in { - value, ok := v.(string) - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return v1.Boolean - } - return nil -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() interface{} { - var err error - var info1 []yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info1) - if err == nil { - return info1 - } - var info2 yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info2) - if err == nil { - return info2 - } - var info3 interface{} - err = yaml.Unmarshal([]byte(m.Yaml), &info3) - if err == nil { - return info3 - } - return nil -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) - } - if m.Email != "" { - info = append(info, yaml.MapItem{"email", m.Email}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Swagger != "" { - info = append(info, yaml.MapItem{"swagger", m.Swagger}) - } - if m.Info != nil { - info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) - } - // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Host != "" { - info = append(info, yaml.MapItem{"host", m.Host}) - } - if m.BasePath != "" { - info = append(info, yaml.MapItem{"basePath", m.BasePath}) - } - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) - } - if m.Paths != nil { - info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) - } - // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Definitions != nil { - info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) - } - // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Parameters != nil { - info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) - } - // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"security", items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) - } - // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Tags) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Tags { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"tags", items}) - } - // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) - } - if m.Version != "" { - info = append(info, yaml.MapItem{"version", m.Version}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.TermsOfService != "" { - info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) - } - if m.Contact != nil { - info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) - } - // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.License != nil { - info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) - } - // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Schema) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"schema", items}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Value != "" { - info = append(info, yaml.MapItem{"value", m.Value}) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) - } - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() interface{} { - info := yaml.MapSlice{} - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{"tags", m.Tags}) - } - if m.Summary != "" { - info = append(info, yaml.MapItem{"summary", m.Summary}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.OperationId != "" { - info = append(info, yaml.MapItem{"operationId", m.OperationId}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) - } - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"parameters", items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) - } - if m.Deprecated != false { - info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) - } - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"security", items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) - } - if m.Get != nil { - info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) - } - // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Put != nil { - info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) - } - // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Post != nil { - info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) - } - // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Delete != nil { - info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) - } - // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Options != nil { - info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) - } - // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Head != nil { - info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) - } - // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Patch != nil { - info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) - } - // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"parameters", items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - if m.Path != nil { - for _, item := range m.Path { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Headers != nil { - info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) - } - // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Examples != nil { - info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) - } - // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) - } - if m.MinProperties != 0 { - info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) - } - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) - } - // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Type != nil { - if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) - } else { - info = append(info, yaml.MapItem{"type", m.Type.Value}) - } - } - // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Items != nil { - items := make([]interface{}, 0) - for _, item := range m.Items.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"items", items[0]}) - } - // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.AllOf) != 0 { - items := make([]interface{}, 0) - for _, item := range m.AllOf { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"allOf", items}) - } - // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.Properties != nil { - info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) - } - // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Discriminator != "" { - info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) - } - if m.Xml != nil { - info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) - } - // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() interface{} { - return m.Value -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Value) != 0 { - info = append(info, yaml.MapItem{"value", m.Value}) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Namespace != "" { - info = append(info, yaml.MapItem{"namespace", m.Namespace}) - } - if m.Prefix != "" { - info = append(info, yaml.MapItem{"prefix", m.Prefix}) - } - if m.Attribute != false { - info = append(info, yaml.MapItem{"attribute", m.Attribute}) - } - if m.Wrapped != false { - info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go deleted file mode 100644 index 37da7df256f9..000000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,4456 +0,0 @@ -// Code generated by protoc-gen-go. -// source: OpenAPIv2/OpenAPIv2.proto -// DO NOT EDIT! - -/* -Package openapi_v2 is a generated protocol buffer package. - -It is generated from these files: - OpenAPIv2/OpenAPIv2.proto - -It has these top-level messages: - AdditionalPropertiesItem - Any - ApiKeySecurity - BasicAuthenticationSecurity - BodyParameter - Contact - Default - Definitions - Document - Examples - ExternalDocs - FileSchema - FormDataParameterSubSchema - Header - HeaderParameterSubSchema - Headers - Info - ItemsItem - JsonReference - License - NamedAny - NamedHeader - NamedParameter - NamedPathItem - NamedResponse - NamedResponseValue - NamedSchema - NamedSecurityDefinitionsItem - NamedString - NamedStringArray - NonBodyParameter - Oauth2AccessCodeSecurity - Oauth2ApplicationSecurity - Oauth2ImplicitSecurity - Oauth2PasswordSecurity - Oauth2Scopes - Operation - Parameter - ParameterDefinitions - ParametersItem - PathItem - PathParameterSubSchema - Paths - PrimitivesItems - Properties - QueryParameterSubSchema - Response - ResponseDefinitions - ResponseValue - Responses - Schema - SchemaItem - SecurityDefinitions - SecurityDefinitionsItem - SecurityRequirement - StringArray - Tag - TypeItem - VendorExtension - Xml -*/ -package openapi_v2 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AdditionalPropertiesItem struct { - // Types that are valid to be assigned to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` -} -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } -} - -func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *AdditionalPropertiesItem_Boolean: - t := uint64(0) - if x.Boolean { - t = 1 - } - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*AdditionalPropertiesItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &AdditionalPropertiesItem_Schema{msg} - return true, err - case 2: // oneof.boolean - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} - return true, err - default: - return false, nil - } -} - -func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *AdditionalPropertiesItem_Boolean: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += 1 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Any struct { - Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Any) GetValue() *google_protobuf.Any { - if m != nil { - return m.Value - } - return nil -} - -func (m *Any) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ApiKeySecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ApiKeySecurity) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ApiKeySecurity) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *ApiKeySecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *BasicAuthenticationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BodyParameter struct { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *BodyParameter) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BodyParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *BodyParameter) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *BodyParameter) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *BodyParameter) GetSchema() *Schema { - if m != nil { - return m.Schema - } - return nil -} - -func (m *BodyParameter) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *Contact) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Contact) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *Contact) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *Contact) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Default struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *Default) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *Definitions) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Document struct { - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *Document) GetSwagger() string { - if m != nil { - return m.Swagger - } - return "" -} - -func (m *Document) GetInfo() *Info { - if m != nil { - return m.Info - } - return nil -} - -func (m *Document) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *Document) GetBasePath() string { - if m != nil { - return m.BasePath - } - return "" -} - -func (m *Document) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Document) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Document) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Document) GetPaths() *Paths { - if m != nil { - return m.Paths - } - return nil -} - -func (m *Document) GetDefinitions() *Definitions { - if m != nil { - return m.Definitions - } - return nil -} - -func (m *Document) GetParameters() *ParameterDefinitions { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Document) GetResponses() *ResponseDefinitions { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Document) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { - if m != nil { - return m.SecurityDefinitions - } - return nil -} - -func (m *Document) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Document) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Document) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Examples struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *Examples) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -func (m *ExternalDocs) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ExternalDocs) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *ExternalDocs) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *FileSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FileSchema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *FileSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FileSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FileSchema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *FileSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FileSchema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *FileSchema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *FileSchema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *FileSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *FormDataParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *FormDataParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FormDataParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *FormDataParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FormDataParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *FormDataParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *FormDataParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *FormDataParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Header struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *Header) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Header) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Header) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *Header) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *Header) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Header) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Header) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Header) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Header) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Header) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Header) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Header) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Header) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Header) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Header) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Header) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Header) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Header) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Header) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -func (m *HeaderParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *HeaderParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *HeaderParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *HeaderParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *HeaderParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *HeaderParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *HeaderParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *HeaderParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Headers struct { - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -func (m *Headers) GetAdditionalProperties() []*NamedHeader { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *Info) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Info) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Info) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Info) GetTermsOfService() string { - if m != nil { - return m.TermsOfService - } - return "" -} - -func (m *Info) GetContact() *Contact { - if m != nil { - return m.Contact - } - return nil -} - -func (m *Info) GetLicense() *License { - if m != nil { - return m.License - } - return nil -} - -func (m *Info) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` -} - -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -func (m *ItemsItem) GetSchema() []*Schema { - if m != nil { - return m.Schema - } - return nil -} - -type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` -} - -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *JsonReference) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *JsonReference) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -type License struct { - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *License) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *License) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *License) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *NamedAny) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedAny) GetValue() *Any { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *NamedHeader) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedHeader) GetValue() *Header { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *NamedParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedParameter) GetValue() *Parameter { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -func (m *NamedPathItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedPathItem) GetValue() *PathItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *NamedResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponse) GetValue() *Response { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *NamedResponseValue) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponseValue) GetValue() *ResponseValue { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -func (m *NamedSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSchema) GetValue() *Schema { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -func (m *NamedSecurityDefinitionsItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -func (m *NamedString) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -func (m *NamedStringArray) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedStringArray) GetValue() *StringArray { - if m != nil { - return m.Value - } - return nil -} - -type NonBodyParameter struct { - // Types that are valid to be assigned to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` -} -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` -} -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` -} -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } -} - -func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_FormDataParameterSubSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_QueryParameterSubSchema: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_PathParameterSubSchema: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NonBodyParameter) - switch tag { - case 1: // oneof.header_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(HeaderParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} - return true, err - case 2: // oneof.form_data_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FormDataParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} - return true, err - case 3: // oneof.query_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(QueryParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} - return true, err - case 4: // oneof.path_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PathParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} - return true, err - default: - return false, nil - } -} - -func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - s := proto.Size(x.HeaderParameterSubSchema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_FormDataParameterSubSchema: - s := proto.Size(x.FormDataParameterSubSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_QueryParameterSubSchema: - s := proto.Size(x.QueryParameterSubSchema) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_PathParameterSubSchema: - s := proto.Size(x.PathParameterSubSchema) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } - -func (m *Oauth2AccessCodeSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } - -func (m *Oauth2ApplicationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } - -func (m *Oauth2ImplicitSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -func (m *Oauth2PasswordSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2PasswordSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } - -func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Operation struct { - Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *Operation) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Operation) GetSummary() string { - if m != nil { - return m.Summary - } - return "" -} - -func (m *Operation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Operation) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Operation) GetOperationId() string { - if m != nil { - return m.OperationId - } - return "" -} - -func (m *Operation) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Operation) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Operation) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Operation) GetResponses() *Responses { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Operation) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Operation) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *Operation) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Operation) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Parameter struct { - // Types that are valid to be assigned to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` -} -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } -} - -func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BodyParameter); err != nil { - return err - } - case *Parameter_NonBodyParameter: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.NonBodyParameter); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Parameter) - switch tag { - case 1: // oneof.body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_BodyParameter{msg} - return true, err - case 2: // oneof.non_body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NonBodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_NonBodyParameter{msg} - return true, err - default: - return false, nil - } -} - -func _Parameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - s := proto.Size(x.BodyParameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Parameter_NonBodyParameter: - s := proto.Size(x.NonBodyParameter) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } - -func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - // Types that are valid to be assigned to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` -} -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ParametersItem) GetParameter() *Parameter { - if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (m *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } -} - -func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Parameter); err != nil { - return err - } - case *ParametersItem_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ParametersItem) - switch tag { - case 1: // oneof.parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Parameter) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_Parameter{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ParametersItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - s := proto.Size(x.Parameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ParametersItem_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type PathItem struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } - -func (m *PathItem) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *PathItem) GetGet() *Operation { - if m != nil { - return m.Get - } - return nil -} - -func (m *PathItem) GetPut() *Operation { - if m != nil { - return m.Put - } - return nil -} - -func (m *PathItem) GetPost() *Operation { - if m != nil { - return m.Post - } - return nil -} - -func (m *PathItem) GetDelete() *Operation { - if m != nil { - return m.Delete - } - return nil -} - -func (m *PathItem) GetOptions() *Operation { - if m != nil { - return m.Options - } - return nil -} - -func (m *PathItem) GetHead() *Operation { - if m != nil { - return m.Head - } - return nil -} - -func (m *PathItem) GetPatch() *Operation { - if m != nil { - return m.Patch - } - return nil -} - -func (m *PathItem) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *PathItem) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } - -func (m *PathParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *PathParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *PathParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PathParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PathParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PathParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PathParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PathParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PathParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PathParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PathParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PathParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PathParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PathParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` -} - -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } - -func (m *Paths) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func (m *Paths) GetPath() []*NamedPathItem { - if m != nil { - return m.Path - } - return nil -} - -type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} -func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } - -func (m *PrimitivesItems) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PrimitivesItems) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PrimitivesItems) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PrimitivesItems) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PrimitivesItems) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PrimitivesItems) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PrimitivesItems) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PrimitivesItems) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PrimitivesItems) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PrimitivesItems) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PrimitivesItems) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PrimitivesItems) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PrimitivesItems) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PrimitivesItems) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PrimitivesItems) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Properties struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } - -func (m *Properties) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } - -func (m *QueryParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *QueryParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *QueryParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *QueryParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *QueryParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *QueryParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *QueryParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *QueryParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *QueryParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *QueryParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *QueryParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *QueryParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *QueryParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *QueryParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Response struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } - -func (m *Response) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Response) GetSchema() *SchemaItem { - if m != nil { - return m.Schema - } - return nil -} - -func (m *Response) GetHeaders() *Headers { - if m != nil { - return m.Headers - } - return nil -} - -func (m *Response) GetExamples() *Examples { - if m != nil { - return m.Examples - } - return nil -} - -func (m *Response) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// One or more JSON representations for parameters -type ResponseDefinitions struct { - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } - -func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - // Types that are valid to be assigned to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` -} - -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` -} -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ResponseValue) GetResponse() *Response { - if x, ok := m.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (m *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } -} - -func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Response); err != nil { - return err - } - case *ResponseValue_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) - } - return nil -} - -func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseValue) - switch tag { - case 1: // oneof.response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Response) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_Response{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - s := proto.Size(x.Response) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseValue_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } - -func (m *Responses) GetResponseCode() []*NamedResponseValue { - if m != nil { - return m.ResponseCode - } - return nil -} - -func (m *Responses) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } - -func (m *Schema) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *Schema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Schema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Schema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Schema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Schema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Schema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Schema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Schema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Schema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Schema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Schema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Schema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Schema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Schema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Schema) GetMaxProperties() int64 { - if m != nil { - return m.MaxProperties - } - return 0 -} - -func (m *Schema) GetMinProperties() int64 { - if m != nil { - return m.MinProperties - } - return 0 -} - -func (m *Schema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *Schema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -func (m *Schema) GetType() *TypeItem { - if m != nil { - return m.Type - } - return nil -} - -func (m *Schema) GetItems() *ItemsItem { - if m != nil { - return m.Items - } - return nil -} - -func (m *Schema) GetAllOf() []*Schema { - if m != nil { - return m.AllOf - } - return nil -} - -func (m *Schema) GetProperties() *Properties { - if m != nil { - return m.Properties - } - return nil -} - -func (m *Schema) GetDiscriminator() string { - if m != nil { - return m.Discriminator - } - return "" -} - -func (m *Schema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *Schema) GetXml() *Xml { - if m != nil { - return m.Xml - } - return nil -} - -func (m *Schema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Schema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *Schema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type SchemaItem struct { - // Types that are valid to be assigned to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` -} -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SchemaItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } -} - -func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *SchemaItem_FileSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FileSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SchemaItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_Schema{msg} - return true, err - case 2: // oneof.file_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FileSchema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_FileSchema{msg} - return true, err - default: - return false, nil - } -} - -func _SchemaItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SchemaItem_FileSchema: - s := proto.Size(x.FileSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } - -func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - // Types that are valid to be assigned to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` -} -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } -} - -func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_ApiKeySecurity: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SecurityDefinitionsItem) - switch tag { - case 1: // oneof.basic_authentication_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BasicAuthenticationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} - return true, err - case 2: // oneof.api_key_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ApiKeySecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} - return true, err - case 3: // oneof.oauth2_implicit_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ImplicitSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} - return true, err - case 4: // oneof.oauth2_password_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2PasswordSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} - return true, err - case 5: // oneof.oauth2_application_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ApplicationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} - return true, err - case 6: // oneof.oauth2_access_code_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2AccessCodeSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} - return true, err - default: - return false, nil - } -} - -func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - s := proto.Size(x.BasicAuthenticationSecurity) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_ApiKeySecurity: - s := proto.Size(x.ApiKeySecurity) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - s := proto.Size(x.Oauth2ImplicitSecurity) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - s := proto.Size(x.Oauth2PasswordSecurity) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - s := proto.Size(x.Oauth2ApplicationSecurity) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - s := proto.Size(x.Oauth2AccessCodeSecurity) - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } - -func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` -} - -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } - -func (m *StringArray) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } - -func (m *Tag) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Tag) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Tag) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Tag) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` -} - -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } - -func (m *TypeItem) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } - -func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } - -func (m *Xml) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Xml) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *Xml) GetPrefix() string { - if m != nil { - return m.Prefix - } - return "" -} - -func (m *Xml) GetAttribute() bool { - if m != nil { - return m.Attribute - } - return false -} - -func (m *Xml) GetWrapped() bool { - if m != nil { - return m.Wrapped - } - return false -} - -func (m *Xml) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func init() { - proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") - proto.RegisterType((*Any)(nil), "openapi.v2.Any") - proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") - proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") - proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") - proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") - proto.RegisterType((*Default)(nil), "openapi.v2.Default") - proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") - proto.RegisterType((*Document)(nil), "openapi.v2.Document") - proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") - proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") - proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") - proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") - proto.RegisterType((*Header)(nil), "openapi.v2.Header") - proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") - proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") - proto.RegisterType((*Info)(nil), "openapi.v2.Info") - proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") - proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") - proto.RegisterType((*License)(nil), "openapi.v2.License") - proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") - proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") - proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") - proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") - proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") - proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") - proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") - proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") - proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") - proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") - proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") - proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") - proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") - proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") - proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") - proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") - proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") - proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") - proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") - proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") - proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") - proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") - proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") - proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") - proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") - proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") - proto.RegisterType((*Response)(nil), "openapi.v2.Response") - proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") - proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") - proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") - proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") - proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") - proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") - proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") - proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") - proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") - proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") - proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") - proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") - proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") -} - -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, - 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, - 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, - 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, - 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, - 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, - 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, - 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, - 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, - 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, - 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, - 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, - 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, - 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, - 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, - 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, - 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, - 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, - 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, - 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, - 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, - 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, - 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, - 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, - 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, - 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, - 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, - 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, - 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, - 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, - 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, - 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, - 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, - 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, - 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, - 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, - 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, - 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, - 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, - 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, - 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, - 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, - 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, - 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, - 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, - 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, - 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, - 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, - 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, - 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, - 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, - 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, - 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, - 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, - 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, - 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, - 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, - 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, - 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, - 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, - 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, - 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, - 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, - 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, - 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, - 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, - 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, - 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, - 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, - 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, - 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, - 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, - 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, - 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, - 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, - 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, - 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, - 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, - 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, - 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, - 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, - 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, - 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, - 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, - 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, - 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, - 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, - 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, - 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, - 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, - 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, - 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, - 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, - 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, - 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, - 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, - 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, - 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, - 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, - 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, - 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, - 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, - 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, - 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, - 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, - 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, - 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, - 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, - 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, - 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, - 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, - 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, - 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, - 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, - 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, - 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, - 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, - 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, - 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, - 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, - 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, - 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, - 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, - 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, - 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, - 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, - 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, - 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, - 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, - 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, - 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, - 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, - 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, - 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, - 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, - 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, - 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, - 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, - 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, - 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, - 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, - 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, - 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, - 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, - 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, - 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, - 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, - 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, - 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, - 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, - 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, - 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, - 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, - 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, - 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, - 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, - 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, - 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, - 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, - 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, - 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, - 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, - 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, - 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, - 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, - 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, - 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, - 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, - 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, - 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, - 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, - 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, - 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, - 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, - 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, - 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, - 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, - 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, - 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, - 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, - 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, - 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, - 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, - 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, - 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, - 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, - 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, - 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, - 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, - 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, - 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, - 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, - 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, - 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, - 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, - 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go deleted file mode 100644 index a64c1b75db40..000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - if context.Parent != nil { - return context.Parent.Description() + "." + context.Name - } - return context.Name -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go deleted file mode 100644 index d8672c1008b7..000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/error.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return "ERROR " + err.Message - } - return "ERROR " + err.Context.Description() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go deleted file mode 100644 index 1f85b650e817..000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - - "strings" - - "errors" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - ext_plugin "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v2" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// HandleExtension calls a binary extension handler. -func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { - handled := false - var errFromPlugin error - var outFromPlugin *any.Any - - if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { - for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { - outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) - if outFromPlugin == nil { - continue - } else { - handled = true - break - } - } - } - return handled, outFromPlugin, errFromPlugin -} - -func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - binary, _ := yaml.Marshal(in) - - request := &ext_plugin.ExtensionHandlerRequest{} - - version := &ext_plugin.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.Wrapper = &ext_plugin.Wrapper{} - - request.Wrapper.Version = "v2" - request.Wrapper.Yaml = string(binary) - request.Wrapper.ExtensionName = extensionName - - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - - if err != nil { - fmt.Printf("Error: %+v\n", err) - return nil, err - } - response := &ext_plugin.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil { - fmt.Printf("Error: %+v\n", err) - fmt.Printf("%s\n", string(output)) - return nil, err - } - if !response.Handled { - return nil, nil - } - if len(response.Error) != 0 { - message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) - return nil, errors.New(message) - } - return response.Value, nil - } - return nil, nil -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go deleted file mode 100644 index 76df635ff9bf..000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "gopkg.in/yaml.v2" - "regexp" - "sort" - "strconv" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a yaml.MapSlice if possible. -func UnpackMap(in interface{}) (yaml.MapSlice, bool) { - m, ok := in.(yaml.MapSlice) - if ok { - return m, true - } - // do we have an empty array? - a, ok := in.([]interface{}) - if ok && len(a) == 0 { - // if so, return an empty map - return yaml.MapSlice{}, true - } - return nil, false -} - -// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. -func SortedKeysForMap(m yaml.MapSlice) []string { - keys := make([]string, 0) - for _, item := range m { - keys = append(keys, item.Key.(string)) - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yaml.MapSlice contains a specified key. -func MapHasKey(m yaml.MapSlice, key string) bool { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return true - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m yaml.MapSlice, key string) interface{} { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return item.Value - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok { - key := itemKey - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - } - return invalidKeys -} - -// DescribeMap describes a map (for debugging purposes). -func DescribeMap(in interface{}, indent string) string { - description := "" - m, ok := in.(map[string]interface{}) - if ok { - keys := make([]string, 0) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := m[k] - description += fmt.Sprintf("%s%s:\n", indent, k) - description += DescribeMap(v, indent+" ") - } - return description - } - a, ok := in.([]interface{}) - if ok { - for i, v := range a { - description += fmt.Sprintf("%s%d:\n", indent, i) - description += DescribeMap(v, indent+" ") - } - return description - } - description += fmt.Sprintf("%s%+v\n", indent, in) - return description -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index 9713a21cca26..000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go deleted file mode 100644 index 604a46a6a1e7..000000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" -) - -var fileCache map[string][]byte -var infoCache map[string]interface{} -var count int64 - -var verboseReader = false - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]interface{}, 0) - } -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - initializeFileCache() - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - log.Printf("Fetching %s", fileurl) - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - defer response.Body.Close() - bytes, err = ioutil.ReadAll(response.Body) - if err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := FetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. -func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { - initializeInfoCache() - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - var info yaml.MapSlice - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - infoCache[filename] = info - return info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (interface{}, error) { - initializeInfoCache() - { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - count = count + 1 - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = basedir + parts[0] - } else { - filename = basefile - } - bytes, err := ReadBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := ReadInfoFromBytes(filename, bytes) - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m, ok := info.(yaml.MapSlice) - if ok { - found := false - for _, section := range m { - if section.Key == key { - info = section.Value - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - infoCache[ref] = info - return info, nil -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go deleted file mode 100644 index b14f1f945fc1..000000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,219 +0,0 @@ -// Code generated by protoc-gen-go. -// source: extension.proto -// DO NOT EDIT! - -/* -Package openapiextension_v1 is a generated protocol buffer package. - -It is generated from these files: - extension.proto - -It has these top-level messages: - Version - ExtensionHandlerRequest - ExtensionHandlerResponse - Wrapper -*/ -package openapiextension_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of OpenAPI compiler. -type Version struct { - Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Version) GetMajor() int32 { - if m != nil { - return m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil { - return m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil { - return m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil { - return m.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` - // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` -} - -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if m != nil { - return m.Wrapper - } - return nil -} - -func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` - // text output - Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` -} - -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ExtensionHandlerResponse) GetHandled() bool { - if m != nil { - return m.Handled - } - return false -} - -func (m *ExtensionHandlerResponse) GetError() []string { - if m != nil { - return m.Error - } - return nil -} - -func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { - if m != nil { - return m.Value - } - return nil -} - -type Wrapper struct { - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // Name of the extension - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` - // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` -} - -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *Wrapper) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Wrapper) GetExtensionName() string { - if m != nil { - return m.ExtensionName - } - return "" -} - -func (m *Wrapper) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") - proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") - proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") - proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") -} - -func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, - 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, - 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, - 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, - 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, - 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, - 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, - 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, - 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, - 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, - 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, - 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, - 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, - 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, - 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, - 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, - 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, - 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, - 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, - 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, - 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, - 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, - 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go deleted file mode 100644 index 94a8e62a776e..000000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapiextension_v1 - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type documentHandler func(version string, extensionName string, document string) -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -func forInputYamlFromOpenapic(handler documentHandler) { - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - fmt.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - fmt.Println("Input error:", err.Error()) - os.Exit(1) - } - handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) -} - -// ProcessExtension calles the handler for a specified extension. -func ProcessExtension(handleExtension extensionHandler) { - response := &ExtensionHandlerResponse{} - forInputYamlFromOpenapic( - func(version string, extensionName string, yamlInput string) { - var newObject proto.Message - var err error - - handled, newObject, err := handleExtension(extensionName, yamlInput) - if !handled { - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - - // If we reach here, then the extension is handled - response.Handled = true - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - response.Value, err = ptypes.MarshalAny(newObject) - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - }) - - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316beb0cbd..000000000000 --- a/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129d8237..000000000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index f6a2ec4a53e8..000000000000 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE deleted file mode 100644 index c33dcc7c928c..000000000000 --- a/vendor/github.com/hashicorp/errwrap/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go deleted file mode 100644 index a733bef18c05..000000000000 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package errwrap implements methods to formalize error wrapping in Go. -// -// All of the top-level functions that take an `error` are built to be able -// to take any error, not just wrapped errors. This allows you to use errwrap -// without having to type-check and type-cast everywhere. -package errwrap - -import ( - "errors" - "reflect" - "strings" -) - -// WalkFunc is the callback called for Walk. -type WalkFunc func(error) - -// Wrapper is an interface that can be implemented by custom types to -// have all the Contains, Get, etc. functions in errwrap work. -// -// When Walk reaches a Wrapper, it will call the callback for every -// wrapped error in addition to the wrapper itself. Since all the top-level -// functions in errwrap use Walk, this means that all those functions work -// with your custom type. -type Wrapper interface { - WrappedErrors() []error -} - -// Wrap defines that outer wraps inner, returning an error type that -// can be cleanly used with the other methods in this package, such as -// Contains, GetAll, etc. -// -// This function won't modify the error message at all (the outer message -// will be used). -func Wrap(outer, inner error) error { - return &wrappedError{ - Outer: outer, - Inner: inner, - } -} - -// Wrapf wraps an error with a formatting message. This is similar to using -// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap -// errors, you should replace it with this. -// -// format is the format of the error message. The string '{{err}}' will -// be replaced with the original error message. -func Wrapf(format string, err error) error { - outerMsg := "" - if err != nil { - outerMsg = err.Error() - } - - outer := errors.New(strings.Replace( - format, "{{err}}", outerMsg, -1)) - - return Wrap(outer, err) -} - -// Contains checks if the given error contains an error with the -// message msg. If err is not a wrapped error, this will always return -// false unless the error itself happens to match this msg. -func Contains(err error, msg string) bool { - return len(GetAll(err, msg)) > 0 -} - -// ContainsType checks if the given error contains an error with -// the same concrete type as v. If err is not a wrapped error, this will -// check the err itself. -func ContainsType(err error, v interface{}) bool { - return len(GetAllType(err, v)) > 0 -} - -// Get is the same as GetAll but returns the deepest matching error. -func Get(err error, msg string) error { - es := GetAll(err, msg) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetType is the same as GetAllType but returns the deepest matching error. -func GetType(err error, v interface{}) error { - es := GetAllType(err, v) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetAll gets all the errors that might be wrapped in err with the -// given message. The order of the errors is such that the outermost -// matching error (the most recent wrap) is index zero, and so on. -func GetAll(err error, msg string) []error { - var result []error - - Walk(err, func(err error) { - if err.Error() == msg { - result = append(result, err) - } - }) - - return result -} - -// GetAllType gets all the errors that are the same type as v. -// -// The order of the return value is the same as described in GetAll. -func GetAllType(err error, v interface{}) []error { - var result []error - - var search string - if v != nil { - search = reflect.TypeOf(v).String() - } - Walk(err, func(err error) { - var needle string - if err != nil { - needle = reflect.TypeOf(err).String() - } - - if needle == search { - result = append(result, err) - } - }) - - return result -} - -// Walk walks all the wrapped errors in err and calls the callback. If -// err isn't a wrapped error, this will be called once for err. If err -// is a wrapped error, the callback will be called for both the wrapper -// that implements error as well as the wrapped error itself. -func Walk(err error, cb WalkFunc) { - if err == nil { - return - } - - switch e := err.(type) { - case *wrappedError: - cb(e.Outer) - Walk(e.Inner, cb) - case Wrapper: - cb(err) - - for _, err := range e.WrappedErrors() { - Walk(err, cb) - } - default: - cb(err) - } -} - -// wrappedError is an implementation of error that has both the -// outer and inner errors. -type wrappedError struct { - Outer error - Inner error -} - -func (w *wrappedError) Error() string { - return w.Outer.Error() -} - -func (w *wrappedError) WrappedErrors() []error { - return []error{w.Outer, w.Inner} -} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE deleted file mode 100644 index 82b4de97c7e3..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go deleted file mode 100644 index 00afa9b35162..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -// Append is a helper function that will append more errors -// onto an Error in order to create a larger multi-error. -// -// If err is not a multierror.Error, then it will be turned into -// one. If any of the errs are multierr.Error, they will be flattened -// one level into err. -func Append(err error, errs ...error) *Error { - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Go through each error and flatten - for _, e := range errs { - switch e := e.(type) { - case *Error: - err.Errors = append(err.Errors, e.Errors...) - default: - err.Errors = append(err.Errors, e) - } - } - - return err - default: - newErrs := make([]error, 0, len(errs)+1) - if err != nil { - newErrs = append(newErrs, err) - } - newErrs = append(newErrs, errs...) - - return Append(&Error{}, newErrs...) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go deleted file mode 100644 index aab8e9abec9d..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/flatten.go +++ /dev/null @@ -1,26 +0,0 @@ -package multierror - -// Flatten flattens the given error, merging any *Errors together into -// a single *Error. -func Flatten(err error) error { - // If it isn't an *Error, just return the error as-is - if _, ok := err.(*Error); !ok { - return err - } - - // Otherwise, make the result and flatten away! - flatErr := new(Error) - flatten(err, flatErr) - return flatErr -} - -func flatten(err error, flatErr *Error) { - switch err := err.(type) { - case *Error: - for _, e := range err.Errors { - flatten(e, flatErr) - } - default: - flatErr.Errors = append(flatErr.Errors, err) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go deleted file mode 100644 index bb65a12e743a..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ /dev/null @@ -1,23 +0,0 @@ -package multierror - -import ( - "fmt" - "strings" -) - -// ErrorFormatFunc is a function callback that is called by Error to -// turn the list of errors into a string. -type ErrorFormatFunc func([]error) string - -// ListFormatFunc is a basic formatter that outputs the number of errors -// that occurred along with a bullet point list of the errors. -func ListFormatFunc(es []error) string { - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d error(s) occurred:\n\n%s", - len(es), strings.Join(points, "\n")) -} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go deleted file mode 100644 index 2ea082732903..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ /dev/null @@ -1,51 +0,0 @@ -package multierror - -import ( - "fmt" -) - -// Error is an error type to track multiple errors. This is used to -// accumulate errors in cases and return them as a single "error". -type Error struct { - Errors []error - ErrorFormat ErrorFormatFunc -} - -func (e *Error) Error() string { - fn := e.ErrorFormat - if fn == nil { - fn = ListFormatFunc - } - - return fn(e.Errors) -} - -// ErrorOrNil returns an error interface if this Error represents -// a list of errors, or returns nil if the list of errors is empty. This -// function is useful at the end of accumulation to make sure that the value -// returned represents the existence of errors. -func (e *Error) ErrorOrNil() error { - if e == nil { - return nil - } - if len(e.Errors) == 0 { - return nil - } - - return e -} - -func (e *Error) GoString() string { - return fmt.Sprintf("*%#v", *e) -} - -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. -// -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to -// satisfy the errwrap.Wrapper interface. -func (e *Error) WrappedErrors() []error { - return e.Errors -} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go deleted file mode 100644 index 5c477abe44f8..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/prefix.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -// Prefix is a helper function that will prefix some text -// to the given error. If the error is a multierror.Error, then -// it will be prefixed to each wrapped error. -// -// This is useful to use when appending multiple multierrors -// together in order to give better scoping. -func Prefix(err error, prefix string) error { - if err == nil { - return nil - } - - format := fmt.Sprintf("%s {{err}}", prefix) - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Wrap each of the errors - for i, e := range err.Errors { - err.Errors[i] = errwrap.Wrapf(format, e) - } - - return err - default: - return errwrap.Wrapf(format, err) - } -} diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE deleted file mode 100644 index c33dcc7c928c..000000000000 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go deleted file mode 100644 index d055759611c8..000000000000 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ /dev/null @@ -1,204 +0,0 @@ -package version - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Constraint represents a single constraint for a version, such as -// ">= 1.0". -type Constraint struct { - f constraintFunc - check *Version - original string -} - -// Constraints is a slice of constraints. We make a custom type so that -// we can add methods to it. -type Constraints []*Constraint - -type constraintFunc func(v, c *Version) bool - -var constraintOperators map[string]constraintFunc - -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintFunc{ - "": constraintEqual, - "=": constraintEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "~>": constraintPessimistic, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - -// NewConstraint will parse one or more constraints from the given -// constraint string. The string must be a comma-separated list of -// constraints. -func NewConstraint(v string) (Constraints, error) { - vs := strings.Split(v, ",") - result := make([]*Constraint, len(vs)) - for i, single := range vs { - c, err := parseSingle(single) - if err != nil { - return nil, err - } - - result[i] = c - } - - return Constraints(result), nil -} - -// Check tests if a version satisfies all the constraints. -func (cs Constraints) Check(v *Version) bool { - for _, c := range cs { - if !c.Check(v) { - return false - } - } - - return true -} - -// Returns the string format of the constraints -func (cs Constraints) String() string { - csStr := make([]string, len(cs)) - for i, c := range cs { - csStr[i] = c.String() - } - - return strings.Join(csStr, ",") -} - -// Check tests if a constraint is validated by the given version. -func (c *Constraint) Check(v *Version) bool { - return c.f(v, c.check) -} - -func (c *Constraint) String() string { - return c.original -} - -func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) - } - - check, err := NewVersion(matches[2]) - if err != nil { - return nil, err - } - - return &Constraint{ - f: constraintOperators[matches[1]], - check: check, - original: v, - }, nil -} - -func prereleaseCheck(v, c *Version) bool { - switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { - case cPre && vPre: - // A constraint with a pre-release can only match a pre-release version - // with the same base segments. - return reflect.DeepEqual(c.Segments64(), v.Segments64()) - - case !cPre && vPre: - // A constraint without a pre-release can only match a version without a - // pre-release. - return false - - case cPre && !vPre: - // OK, except with the pessimistic operator - case !cPre && !vPre: - // OK - } - return true -} - -//------------------------------------------------------------------- -// Constraint functions -//------------------------------------------------------------------- - -func constraintEqual(v, c *Version) bool { - return v.Equal(c) -} - -func constraintNotEqual(v, c *Version) bool { - return !v.Equal(c) -} - -func constraintGreaterThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == 1 -} - -func constraintLessThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == -1 -} - -func constraintGreaterThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) >= 0 -} - -func constraintLessThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) <= 0 -} - -func constraintPessimistic(v, c *Version) bool { - // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases - if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { - return false - } - - // If the version being checked is naturally less than the constraint, then there - // is no way for the version to be valid against the constraint - if v.LessThan(c) { - return false - } - // We'll use this more than once, so grab the length now so it's a little cleaner - // to write the later checks - cs := len(c.segments) - - // If the version being checked has less specificity than the constraint, then there - // is no way for the version to be valid against the constraint - if cs > len(v.segments) { - return false - } - - // Check the segments in the constraint against those in the version. If the version - // being checked, at any point, does not have the same values in each index of the - // constraints segments, then it cannot be valid against the constraint. - for i := 0; i < c.si-1; i++ { - if v.segments[i] != c.segments[i] { - return false - } - } - - // Check the last part of the segment in the constraint. If the version segment at - // this index is less than the constraints segment at this index, then it cannot - // be valid against the constraint - if c.segments[cs-1] > v.segments[cs-1] { - return false - } - - // If nothing has rejected the version by now, it's valid - return true -} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go deleted file mode 100644 index 186fd7cc1f34..000000000000 --- a/vendor/github.com/hashicorp/go-version/version.go +++ /dev/null @@ -1,370 +0,0 @@ -package version - -import ( - "bytes" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -// The compiled regular expression used to test the validity of a version. -var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp -) - -// The raw regular expression string used for testing the validity -// of a version. -const ( - VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` - - // SemverRegexpRaw requires a separator between version and prerelease - SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` -) - -// Version represents a single version. -type Version struct { - metadata string - pre string - segments []int64 - si int - original string -} - -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - -// NewVersion parses the given version and returns a new -// Version. -func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) -} - -// NewSemver parses the given version and returns a new -// Version that adheres strictly to SemVer specs -// https://semver.org/ -func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) -} - -func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { - matches := pattern.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) - } - segmentsStr := strings.Split(matches[1], ".") - segments := make([]int64, len(segmentsStr)) - si := 0 - for i, str := range segmentsStr { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version: %s", err) - } - - segments[i] = int64(val) - si++ - } - - // Even though we could support more than three segments, if we - // got less than three, pad it with 0s. This is to cover the basic - // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum - for i := len(segments); i < 3; i++ { - segments = append(segments, 0) - } - - pre := matches[7] - if pre == "" { - pre = matches[4] - } - - return &Version{ - metadata: matches[10], - pre: pre, - segments: segments, - si: si, - original: v, - }, nil -} - -// Must is a helper that wraps a call to a function returning (*Version, error) -// and panics if error is non-nil. -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - - return v -} - -// Compare compares this version to another version. This -// returns -1, 0, or 1 if this version is smaller, equal, -// or larger than the other version, respectively. -// -// If you want boolean results, use the LessThan, Equal, -// or GreaterThan methods. -func (v *Version) Compare(other *Version) int { - // A quick, efficient equality check - if v.String() == other.String() { - return 0 - } - - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { - preSelf := v.Prerelease() - preOther := other.Prerelease() - if preSelf == "" && preOther == "" { - return 0 - } - if preSelf == "" { - return 1 - } - if preOther == "" { - return -1 - } - - return comparePrereleases(preSelf, preOther) - } - - // Get the highest specificity (hS), or if they're equal, just use segmentSelf length - lenSelf := len(segmentsSelf) - lenOther := len(segmentsOther) - hS := lenSelf - if lenSelf < lenOther { - hS = lenOther - } - // Compare the segments - // Because a constraint could have more/less specificity than the version it's - // checking, we need to account for a lopsided or jagged comparison - for i := 0; i < hS; i++ { - if i > lenSelf-1 { - // This means Self had the lower specificity - // Check to see if the remaining segments in Other are all zeros - if !allZero(segmentsOther[i:]) { - // if not, it means that Other has to be greater than Self - return -1 - } - break - } else if i > lenOther-1 { - // this means Other had the lower specificity - // Check to see if the remaining segments in Self are all zeros - - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other - return 1 - } - break - } - lhs := segmentsSelf[i] - rhs := segmentsOther[i] - if lhs == rhs { - continue - } else if lhs < rhs { - return -1 - } - // Otherwis, rhs was > lhs, they're not equal - return 1 - } - - // if we got this far, they're equal - return 0 -} - -func allZero(segs []int64) bool { - for _, s := range segs { - if s != 0 { - return false - } - } - return true -} - -func comparePart(preSelf string, preOther string) int { - if preSelf == preOther { - return 0 - } - - var selfInt int64 - selfNumeric := true - selfInt, err := strconv.ParseInt(preSelf, 10, 64) - if err != nil { - selfNumeric = false - } - - var otherInt int64 - otherNumeric := true - otherInt, err = strconv.ParseInt(preOther, 10, 64) - if err != nil { - otherNumeric = false - } - - // if a part is empty, we use the other to decide - if preSelf == "" { - if otherNumeric { - return -1 - } - return 1 - } - - if preOther == "" { - if selfNumeric { - return 1 - } - return -1 - } - - if selfNumeric && !otherNumeric { - return -1 - } else if !selfNumeric && otherNumeric { - return 1 - } else if !selfNumeric && !otherNumeric && preSelf > preOther { - return 1 - } else if selfInt > otherInt { - return 1 - } - - return -1 -} - -func comparePrereleases(v string, other string) int { - // the same pre release! - if v == other { - return 0 - } - - // split both pre releases for analyse their parts - selfPreReleaseMeta := strings.Split(v, ".") - otherPreReleaseMeta := strings.Split(other, ".") - - selfPreReleaseLen := len(selfPreReleaseMeta) - otherPreReleaseLen := len(otherPreReleaseMeta) - - biggestLen := otherPreReleaseLen - if selfPreReleaseLen > otherPreReleaseLen { - biggestLen = selfPreReleaseLen - } - - // loop for parts to find the first difference - for i := 0; i < biggestLen; i = i + 1 { - partSelfPre := "" - if i < selfPreReleaseLen { - partSelfPre = selfPreReleaseMeta[i] - } - - partOtherPre := "" - if i < otherPreReleaseLen { - partOtherPre = otherPreReleaseMeta[i] - } - - compare := comparePart(partSelfPre, partOtherPre) - // if parts are equals, continue the loop - if compare != 0 { - return compare - } - } - - return 0 -} - -// Equal tests if two versions are equal. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// GreaterThan tests if this version is greater than another version. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// LessThan tests if this version is less than another version. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// Metadata returns any metadata that was part of the version -// string. -// -// Metadata is anything that comes after the "+" in the version. -// For example, with "1.2.3+beta", the metadata is "beta". -func (v *Version) Metadata() string { - return v.metadata -} - -// Prerelease returns any prerelease data that is part of the version, -// or blank if there is no prerelease data. -// -// Prerelease information is anything that comes after the "-" in the -// version (but before any metadata). For example, with "1.2.3-beta", -// the prerelease information is "beta". -func (v *Version) Prerelease() string { - return v.pre -} - -// Segments returns the numeric segments of the version as a slice of ints. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments() []int { - segmentSlice := make([]int, len(v.segments)) - for i, v := range v.segments { - segmentSlice[i] = int(v) - } - return segmentSlice -} - -// Segments64 returns the numeric segments of the version as a slice of int64s. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments64() []int64 { - result := make([]int64, len(v.segments)) - copy(result, v.segments) - return result -} - -// String returns the full version string included pre-release -// and metadata information. -// -// This value is rebuilt according to the parsed segments and other -// information. Therefore, ambiguities in the version string such as -// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and -// missing parts (1.0 => 1.0.0) will be made into a canonicalized form -// as shown in the parenthesized examples. -func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) - for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str - } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original parsed version as-is, including any -// potential whitespace, `v` prefix, etc. -func (v *Version) Original() string { - return v.original -} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go deleted file mode 100644 index cc888d43e6b6..000000000000 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -// Collection is a type that implements the sort.Interface interface -// so that versions can be sorted. -type Collection []*Version - -func (v Collection) Len() int { - return len(v) -} - -func (v Collection) Less(i, j int) bool { - return v[i].LessThan(v[j]) -} - -func (v Collection) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go deleted file mode 100644 index 337d963296cd..000000000000 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ /dev/null @@ -1,212 +0,0 @@ -package lru - -import ( - "fmt" - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -const ( - // Default2QRecentRatio is the ratio of the 2Q cache dedicated - // to recently added entries that have only been accessed once. - Default2QRecentRatio = 0.25 - - // Default2QGhostEntries is the default ratio of ghost - // entries kept to track entries recently evicted - Default2QGhostEntries = 0.50 -) - -// TwoQueueCache is a thread-safe fixed size 2Q cache. -// 2Q is an enhancement over the standard LRU cache -// in that it tracks both frequently and recently used -// entries separately. This avoids a burst in access to new -// entries from evicting frequently used entries. It adds some -// additional tracking overhead to the standard LRU cache, and is -// computationally about 2x the cost, and adds some metadata over -// head. The ARCCache is similar, but does not require setting any -// parameters. -type TwoQueueCache struct { - size int - recentSize int - - recent *simplelru.LRU - frequent *simplelru.LRU - recentEvict *simplelru.LRU - lock sync.RWMutex -} - -// New2Q creates a new TwoQueueCache using the default -// values for the parameters. -func New2Q(size int) (*TwoQueueCache, error) { - return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) -} - -// New2QParams creates a new TwoQueueCache using the provided -// parameter values. -func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { - if size <= 0 { - return nil, fmt.Errorf("invalid size") - } - if recentRatio < 0.0 || recentRatio > 1.0 { - return nil, fmt.Errorf("invalid recent ratio") - } - if ghostRatio < 0.0 || ghostRatio > 1.0 { - return nil, fmt.Errorf("invalid ghost ratio") - } - - // Determine the sub-sizes - recentSize := int(float64(size) * recentRatio) - evictSize := int(float64(size) * ghostRatio) - - // Allocate the LRUs - recent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - frequent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - recentEvict, err := simplelru.NewLRU(evictSize, nil) - if err != nil { - return nil, err - } - - // Initialize the cache - c := &TwoQueueCache{ - size: size, - recentSize: recentSize, - recent: recent, - frequent: frequent, - recentEvict: recentEvict, - } - return c, nil -} - -func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if this is a frequent value - if val, ok := c.frequent.Get(key); ok { - return val, ok - } - - // If the value is contained in recent, then we - // promote it to frequent - if val, ok := c.recent.Peek(key); ok { - c.recent.Remove(key) - c.frequent.Add(key, val) - return val, ok - } - - // No hit - return nil, false -} - -func (c *TwoQueueCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is frequently used already, - // and just update the value - if c.frequent.Contains(key) { - c.frequent.Add(key, value) - return - } - - // Check if the value is recently used, and promote - // the value into the frequent list - if c.recent.Contains(key) { - c.recent.Remove(key) - c.frequent.Add(key, value) - return - } - - // If the value was recently evicted, add it to the - // frequently used list - if c.recentEvict.Contains(key) { - c.ensureSpace(true) - c.recentEvict.Remove(key) - c.frequent.Add(key, value) - return - } - - // Add to the recently seen list - c.ensureSpace(false) - c.recent.Add(key, value) - return -} - -// ensureSpace is used to ensure we have space in the cache -func (c *TwoQueueCache) ensureSpace(recentEvict bool) { - // If we have space, nothing to do - recentLen := c.recent.Len() - freqLen := c.frequent.Len() - if recentLen+freqLen < c.size { - return - } - - // If the recent buffer is larger than - // the target, evict from there - if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { - k, _, _ := c.recent.RemoveOldest() - c.recentEvict.Add(k, nil) - return - } - - // Remove from the frequent list otherwise - c.frequent.RemoveOldest() -} - -func (c *TwoQueueCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.recent.Len() + c.frequent.Len() -} - -func (c *TwoQueueCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.frequent.Keys() - k2 := c.recent.Keys() - return append(k1, k2...) -} - -func (c *TwoQueueCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.frequent.Remove(key) { - return - } - if c.recent.Remove(key) { - return - } - if c.recentEvict.Remove(key) { - return - } -} - -func (c *TwoQueueCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.recent.Purge() - c.frequent.Purge() - c.recentEvict.Purge() -} - -func (c *TwoQueueCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.frequent.Contains(key) || c.recent.Contains(key) -} - -func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.frequent.Peek(key); ok { - return val, ok - } - return c.recent.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index be2cc4dfb609..000000000000 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go deleted file mode 100644 index a2a252817333..000000000000 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ /dev/null @@ -1,257 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). -// ARC is an enhancement over the standard LRU cache in that tracks both -// frequency and recency of use. This avoids a burst in access to new -// entries from evicting the frequently used older entries. It adds some -// additional tracking overhead to a standard LRU cache, computationally -// it is roughly 2x the cost, and the extra memory overhead is linear -// with the size of the cache. ARC has been patented by IBM, but is -// similar to the TwoQueueCache (2Q) which requires setting parameters. -type ARCCache struct { - size int // Size is the total capacity of the cache - p int // P is the dynamic preference towards T1 or T2 - - t1 *simplelru.LRU // T1 is the LRU for recently accessed items - b1 *simplelru.LRU // B1 is the LRU for evictions from t1 - - t2 *simplelru.LRU // T2 is the LRU for frequently accessed items - b2 *simplelru.LRU // B2 is the LRU for evictions from t2 - - lock sync.RWMutex -} - -// NewARC creates an ARC of the given size -func NewARC(size int) (*ARCCache, error) { - // Create the sub LRUs - b1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - b2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - - // Initialize the ARC - c := &ARCCache{ - size: size, - p: 0, - t1: t1, - b1: b1, - t2: t2, - b2: b2, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Ff the value is contained in T1 (recent), then - // promote it to T2 (frequent) - if val, ok := c.t1.Peek(key); ok { - c.t1.Remove(key) - c.t2.Add(key, val) - return val, ok - } - - // Check if the value is contained in T2 (frequent) - if val, ok := c.t2.Get(key); ok { - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *ARCCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is contained in T1 (recent), and potentially - // promote it to frequent T2 - if c.t1.Contains(key) { - c.t1.Remove(key) - c.t2.Add(key, value) - return - } - - // Check if the value is already in T2 (frequent) and update it - if c.t2.Contains(key) { - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // recently used list - if c.b1.Contains(key) { - // T1 set is too small, increase P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b2Len > b1Len { - delta = b2Len / b1Len - } - if c.p+delta >= c.size { - c.p = c.size - } else { - c.p += delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Remove from B1 - c.b1.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // frequently used list - if c.b2.Contains(key) { - // T2 set is too small, decrease P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b1Len > b2Len { - delta = b1Len / b2Len - } - if delta >= c.p { - c.p = 0 - } else { - c.p -= delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(true) - } - - // Remove from B2 - c.b2.Remove(key) - - // Add the key to the frequntly used list - c.t2.Add(key, value) - return - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Keep the size of the ghost buffers trim - if c.b1.Len() > c.size-c.p { - c.b1.RemoveOldest() - } - if c.b2.Len() > c.p { - c.b2.RemoveOldest() - } - - // Add to the recently seen list - c.t1.Add(key, value) - return -} - -// replace is used to adaptively evict from either T1 or T2 -// based on the current learned value of P -func (c *ARCCache) replace(b2ContainsKey bool) { - t1Len := c.t1.Len() - if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { - k, _, ok := c.t1.RemoveOldest() - if ok { - c.b1.Add(k, nil) - } - } else { - k, _, ok := c.t2.RemoveOldest() - if ok { - c.b2.Add(k, nil) - } - } -} - -// Len returns the number of cached entries -func (c *ARCCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Len() + c.t2.Len() -} - -// Keys returns all the cached keys -func (c *ARCCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.t1.Keys() - k2 := c.t2.Keys() - return append(k1, k2...) -} - -// Remove is used to purge a key from the cache -func (c *ARCCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.t1.Remove(key) { - return - } - if c.t2.Remove(key) { - return - } - if c.b1.Remove(key) { - return - } - if c.b2.Remove(key) { - return - } -} - -// Purge is used to clear the cache -func (c *ARCCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.t1.Purge() - c.t2.Purge() - c.b1.Purge() - c.b2.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *ARCCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Contains(key) || c.t2.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.t1.Peek(key); ok { - return val, ok - } - return c.t2.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go deleted file mode 100644 index a6285f989e0a..000000000000 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ /dev/null @@ -1,114 +0,0 @@ -// This package provides a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// Cache is a thread-safe fixed size LRU cache. -type Cache struct { - lru *simplelru.LRU - lock sync.RWMutex -} - -// New creates an LRU of the given size -func New(size int) (*Cache, error) { - return NewWithEvict(size, nil) -} - -// NewWithEvict constructs a fixed size cache with the given eviction -// callback. -func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { - lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) - if err != nil { - return nil, err - } - c := &Cache{ - lru: lru, - } - return c, nil -} - -// Purge is used to completely clear the cache -func (c *Cache) Purge() { - c.lock.Lock() - c.lru.Purge() - c.lock.Unlock() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) bool { - c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Add(key, value) -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Get(key) -} - -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *Cache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Contains(key) -} - -// Returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Peek(key) -} - -// ContainsOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { - c.lock.Lock() - defer c.lock.Unlock() - - if c.lru.Contains(key) { - return true, false - } else { - evict := c.lru.Add(key, value) - return false, evict - } -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) { - c.lock.Lock() - c.lru.Remove(key) - c.lock.Unlock() -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - c.lock.Lock() - c.lru.RemoveOldest() - c.lock.Unlock() -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Keys() -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Len() -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 68d097a1c06e..000000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,160 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("Must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occured. -func (c *LRU) Add(key, value interface{}) bool { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - return -} - -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) bool { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (interface{}, interface{}, bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c928c..000000000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index cfddbf3605f5..000000000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,659 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(int(v))) - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(int(v))) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - field.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, field); err != nil { - return err - } - } - - decodedFields = append(decodedFields, fieldType.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50b5c..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 692ac24e2911..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,217 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // associated line comment, only when used in a list - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b022..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381dfbf1..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index f46ed4cc0330..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,489 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - fmt.Println("illegal") - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // If there is no error, we should be at a RBRACE to end the object - if p.tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index b20416539e75..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,640 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 956c8991c343..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,244 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section, except for escaped backslashes, which we handle specifically. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - // We special case escaped backslashes in interpolations, converting - // them to their unescaped equivalents. - if r == '\\' { - q, _ := utf8.DecodeRuneInString(s) - switch q { - case '\\': - continue - } - } - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d4a4..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664ecd3..000000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 3a62ec3f6c49..000000000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,303 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - fmt.Println("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index 477f71ff3db8..000000000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d4a4..000000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee653..000000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c2928a5..000000000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4cee2..000000000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/hooklift/iso9660/LICENSE b/vendor/github.com/hooklift/iso9660/LICENSE deleted file mode 100644 index 52d135112eeb..000000000000 --- a/vendor/github.com/hooklift/iso9660/LICENSE +++ /dev/null @@ -1,374 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hooklift/iso9660/iso9660.go b/vendor/github.com/hooklift/iso9660/iso9660.go deleted file mode 100644 index c5a114cd91ef..000000000000 --- a/vendor/github.com/hooklift/iso9660/iso9660.go +++ /dev/null @@ -1,418 +0,0 @@ -// Package iso9660 implements ECMA-119 standard, also known as ISO 9660. -// -// References: -// -// * https://en.wikipedia.org/wiki/ISO_9660 -// -// * http://alumnus.caltech.edu/~pje/iso9660.html -// -// * http://users.telenet.be/it3.consultants.bvba/handouts/ISO9960.html -// -// * http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-119.pdf -// -// * http://www.drdobbs.com/database/inside-the-iso-9660-filesystem-format/184408899 -// -// * http://www.cdfs.com -// -// * http://wiki.osdev.org/ISO_9660 -package iso9660 - -import ( - "io" - "os" - "strings" - "time" -) - -// File represents a concrete implementation of os.FileInfo interface for -// accessing ISO 9660 file data -type File struct { - DirectoryRecord - fileID string - // We have the raw image here only to be able to access file extents - image *os.File -} - -// Name returns the file's name. -func (f *File) Name() string { - name := strings.Split(f.fileID, ";")[0] - return strings.ToLower(name) -} - -// Size returns the file size in bytes -func (f *File) Size() int64 { - return int64(f.ExtentLengthBE) -} - -// Mode returns file's mode and permissions bits. Since we don't yet support -// Rock Ridge extensions we cannot extract POSIX permissions and the rest of the -// normal metadata. So, right we return 0740 for directories and 0640 for files. -func (f *File) Mode() os.FileMode { - if f.IsDir() { - return os.FileMode(0740) - } - return os.FileMode(0640) -} - -// ModTime returns file's modification time. -func (f *File) ModTime() time.Time { - return time.Now() -} - -// IsDir tells whether the file is a directory or not. -func (f *File) IsDir() bool { - if (f.FileFlags & 2) == 2 { - return true - } - return false -} - -// Sys returns io.Reader instance pointing to the file's content if it is not a directory, nil otherwise. -func (f *File) Sys() interface{} { - if f.IsDir() { - return nil - } - - return io.NewSectionReader(f.image, int64(f.ExtentLocationBE*sectorSize), int64(f.ExtentLengthBE)) -} - -const ( - bootRecord = 0 - primaryVol = 1 - supplementaryVol = 2 - volPartition = 3 - volSetTerminator = 255 - // System area goes from sectors 0x00 to 0x0F. Volume descriptors can be - // found starting at sector 0x10 - dataAreaSector = 0x10 - sectorSize = 2048 -) - -// VolumeDescriptor identify the volume, the partitions recorded on the volume, -// the volume creator(s), certain attributes of the volume, the location of -// other recorded descriptors and the version of the standard which applies -// to the volume descriptor. -// -// When preparing to mount a CD, your first action will be reading the volume -// descriptors (specifically, you will be looking for the Primary Volume Descriptor). -// Since sectors 0x00-0x0F of the CD are reserved as System Area, the Volume -// Descriptors can be found starting at sector 0x10. -type VolumeDescriptor struct { - // 0: BootRecord - // 1: Primary Volume Descriptor - // 2: Supplementary Volume Descriptor - // 3: Volume Partition Descriptor - // 4-254: Reserved - // 255: Volume Descriptor Set Terminator - Type byte - // Always "CD001". - StandardID [5]byte - //Volume Descriptor Version (0x01). - Version byte -} - -// BootRecord identifies a system which can recognize and act upon the content -// of the field reserved for boot system use in the Boot Record, and shall -// contain information which is used to achieve a specific state for a system or -// for an application. -type BootRecord struct { - VolumeDescriptor - SystemID [32]byte - ID [32]byte - SystemUse [1977]byte -} - -// Terminator indicates the termination of a Volume Descriptor Set. -type Terminator struct { - VolumeDescriptor - // All bytes of this field are set to (00). - Reserved [2041]byte -} - -// PrimaryVolumePart1 represents the Primary Volume Descriptor first half, before the -// root directory record. We are only reading big-endian values so placeholders -// are used for little-endian ones. -type PrimaryVolumePart1 struct { - VolumeDescriptor - // Unused - _ byte // 00 - // The name of the system that can act upon sectors 0x00-0x0F for the volume. - SystemID [32]byte - // Identification of this volume. - ID [32]byte - //Unused2 - _ [8]byte - // Amount of data available on the CD-ROM. Ignores little-endian order. - // Takes big-endian encoded value. - VolumeSpaceSizeLE int32 - VolumeSpaceSizeBE int32 - Unused2 [32]byte - // The size of the set in this logical volume (number of disks). Ignores - // little-endian order. Takes big-endian encoded value. - VolumeSetSizeLE int16 - VolumeSetSizeBE int16 - // The number of this disk in the Volume Set. Ignores little-endian order. - // Takes big-endian encoded value. - VolumeSeqNumberLE int16 - VolumeSeqNumberBE int16 - // The size in bytes of a logical block. NB: This means that a logical block - // on a CD could be something other than 2 KiB! - LogicalBlkSizeLE int16 - LogicalBlkSizeBE int16 - // The size in bytes of the path table. Ignores little-endian order. - // Takes big-endian encoded value. - PathTableSizeLE int32 - PathTableSizeBE int32 - // LBA location of the path table. The path table pointed to contains only - // little-endian values. - LocPathTableLE int32 - // LBA location of the optional path table. The path table pointed to contains - // only little-endian values. Zero means that no optional path table exists. - LocOptPathTableLE int32 - // LBA location of the path table. The path table pointed to contains - // only big-endian values. - LocPathTableBE int32 - // LBA location of the optional path table. The path table pointed to contains - // only big-endian values. Zero means that no optional path table exists. - LocOptPathTableBE int32 -} - -// DirectoryRecord describes the characteristics of a file or directory, -// beginning with a length octet describing the size of the entire entry. -// Entries themselves are of variable length, up to 255 octets in size. -// Attributes for the file described by the directory entry are stored in the -// directory entry itself (unlike UNIX). -// The root directory entry is a variable length object, so that the name can be of variable length. -// -// Important: before each entry there can be "fake entries" to support the Long File Name. -// -// Even if a directory spans multiple sectors, the directory entries are not -// permitted to cross the sector boundary (unlike the path table). Where there -// is not enough space to record an entire directory entry at the end of a sector, -// that sector is zero-padded and the next consecutive sector is used. -// Unfortunately, the date/time format is different from that used in the Primary -// Volume Descriptor. -type DirectoryRecord struct { - // Extended Attribute Record length, stored at the beginning of - // the file's extent. - ExtendedAttrLen byte - // Location of extent (Logical Block Address) in both-endian format. - ExtentLocationLE uint32 - ExtentLocationBE uint32 - // Data length (size of extent) in both-endian format. - ExtentLengthLE uint32 - ExtentLengthBE uint32 - // Date and the time of the day at which the information in the Extent - // described by the Directory Record was recorded. - RecordedTime [7]byte - // If this Directory Record identifies a directory then bit positions 2, 3 - // and 7 shall be set to ZERO. If no Extended Attribute Record is associated - // with the File Section identified by this Directory Record then bit - // positions 3 and 4 shall be set to ZERO. -- 9.1.6 - FileFlags byte - // File unit size for files recorded in interleaved mode, zero otherwise. - FileUnitSize byte - // Interleave gap size for files recorded in interleaved mode, zero otherwise. - InterleaveGapSize byte - // Volume sequence number - the volume that this extent is recorded on, in - // 16 bit both-endian format. - VolumeSeqNumberLE uint16 - VolumeSeqNumberBE uint16 - // Length of file identifier (file name). This terminates with a ';' - // character followed by the file ID number in ASCII coded decimal ('1'). - FileIDLength byte - // The interpretation of this field depends as follows on the setting of the - // Directory bit of the File Flags field. If set to ZERO, it shall mean: - // - // − The field shall specify an identification for the file. - // − The characters in this field shall be d-characters or d1-characters, SEPARATOR 1, SEPARATOR 2. - // − The field shall be recorded as specified in 7.5. If set to ONE, it shall mean: - // − The field shall specify an identification for the directory. - // − The characters in this field shall be d-characters or d1-characters, or only a (00) byte, or only a (01) byte. - // − The field shall be recorded as specified in 7.6. - // fileID string -} - -// PrimaryVolumePart2 represents the Primary Volume Descriptor half after the -// root directory record. -type PrimaryVolumePart2 struct { - // Identifier of the volume set of which this volume is a member. - VolumeSetID [128]byte - // The volume publisher. For extended publisher information, the first byte - // should be 0x5F, followed by the filename of a file in the root directory. - // If not specified, all bytes should be 0x20. - PublisherID [128]byte - // The identifier of the person(s) who prepared the data for this volume. - // For extended preparation information, the first byte should be 0x5F, - // followed by the filename of a file in the root directory. If not specified, - // all bytes should be 0x20. - DataPreparerID [128]byte - // Identifies how the data are recorded on this volume. For extended information, - // the first byte should be 0x5F, followed by the filename of a file in the root - // directory. If not specified, all bytes should be 0x20. - AppID [128]byte - // Filename of a file in the root directory that contains copyright - // information for this volume set. If not specified, all bytes should be 0x20. - CopyrightFileID [37]byte - // Filename of a file in the root directory that contains abstract information - // for this volume set. If not specified, all bytes should be 0x20. - AbstractFileID [37]byte - // Filename of a file in the root directory that contains bibliographic - // information for this volume set. If not specified, all bytes should be 0x20. - BibliographicFileID [37]byte - // The date and time of when the volume was created. - CreationTime [17]byte - // The date and time of when the volume was modified. - ModificationTime [17]byte - // The date and time after which this volume is considered to be obsolete. - // If not specified, then the volume is never considered to be obsolete. - ExpirationTime [17]byte - // The date and time after which the volume may be used. If not specified, - // the volume may be used immediately. - EffectiveTime [17]byte - // The directory records and path table version (always 0x01). - FileStructVersion byte - // Reserved. Always 0x00. - _ byte - // Contents not defined by ISO 9660. - AppUse [512]byte - // Reserved by ISO. - _ [653]byte -} - -// PrimaryVolume descriptor acts much like the superblock of the UNIX filesystem, providing -// details on the ISO-9660 compliant portions of the disk. While we can have -// many kinds of filesystems on a single ISO-9660 CD-ROM, we can have only one -// ISO-9660 file structure (found as the primary volume-descriptor type). -// -// Directory entries are successively stored within this region. Evaluation of -// the ISO 9660 filenames is begun at this location. The root directory is stored -// as an extent, or sequential series of sectors, that contains each of the -// directory entries appearing in the root. -// -// Since ISO 9660 works by segmenting the CD-ROM into logical blocks, the size -// of these blocks is found in the primary volume descriptor as well. -type PrimaryVolume struct { - PrimaryVolumePart1 - DirectoryRecord DirectoryRecord - PrimaryVolumePart2 -} - -// SupplementaryVolume is used by Joliet. -type SupplementaryVolume struct { - VolumeDescriptor - Flags int `struc:"int8"` - SystemID string `struc:"[32]byte"` - ID string `struc:"[32]byte"` - Unused byte - VolumeSpaceSize int `struc:"int32"` - EscapeSequences string `struc:"[32]byte"` - VolumeSetSize int `struc:"int16"` - VolumeSeqNumber int `struc:"int16"` - LogicalBlkSize int `struc:"int16"` - PathTableSize int `struc:"int32"` - LocLPathTable int `struc:"int32"` - LocOptLPathTable int `struc:"int32"` - LocMPathTable int `struc:"int32"` - LocOptMPathTable int `struc:"int32"` - RootDirRecord DirectoryRecord - VolumeSetID string `struc:"[128]byte"` - PublisherID string `struc:"[128]byte"` - DataPreparerID string `struc:"[128]byte"` - AppID string `struc:"[128]byte"` - CopyrightFileID string `struc:"[37]byte"` - AbstractFileID string `struc:"[37]byte"` - BibliographicFileID string `struc:"[37]byte"` - CreationTime Timestamp - ModificationTime Timestamp - ExpirationTime Timestamp - EffectiveTime Timestamp - FileStructVersion int `struc:"int8"` - Reserved byte - AppData [512]byte - Reserved2 byte -} - -// PartitionVolume ... -type PartitionVolume struct { - VolumeDescriptor - Unused byte - SystemID string `struc:"[32]byte"` - ID string `struc:"[32]byte"` - Location int `struc:"int8"` - Size int `struc:"int8"` - SystemUse [1960]byte -} - -// Timestamp ... -type Timestamp struct { - Year int `struc:"[4]byte"` - Month int `struc:"[2]byte"` - DayOfMonth int `struc:"[2]byte"` - Hour int `struc:"[2]byte"` - Minute int `struc:"[2]byte"` - Second int `struc:"[2]byte"` - Millisecond int `struc:"[2]byte"` - GMTOffset int `struc:"uint8"` -} - -// ExtendedAttrRecord are simply a way to extend the attributes of files. -// Since attributes vary according to the user, most everyone has a different -// opinion on what a file attribute should specify. -type ExtendedAttrRecord struct { - OwnerID int `struc:"int16"` - GroupID int `struc:"int16"` - Permissions int `struc:"int16"` - CreationTime Timestamp - ModificationTime Timestamp - ExpirationTime Timestamp - // Specifies the date and the time of the day at which the information in - // the file may be used. If the date and time are not specified then the - // information may be used at once. - EffectiveTime Timestamp - Format int `struc:"uint8"` - Attributes int `struc:"uint8"` - Length int `struc:"int16"` - SystemID string `struc:"[32]byte"` - SystemUse [64]byte - Version int `struc:"uint8"` - EscapeSeqLength int `struc:"uint8"` - Reserved [64]byte - AppUseLength int `struc:"int16,sizeof=AppUse"` - AppUse []byte - EscapeSequences []byte `struc:"sizefrom=AppUseLength"` -} - -// PathTable contains a well-ordered sequence of records describing every -// directory extent on the CD. There are some exceptions with this: the Path -// Table can only contain 65536 records, due to the length of the "Parent Directory Number" field. -// If there are more than this number of directories on the disc, some CD -// authoring software will ignore this limit and create a non-compliant -// CD (this applies to some earlier versions of Nero, for example). If your -// file system uses the path table, you should be aware of this possibility. -// Windows uses the Path Table and will fail with such non-compliant -// CD's (additional nodes exist but appear as zero-byte). Linux, which uses -// the directory tables is not affected by this issue. The location of the path -// tables can be found in the Primary Volume Descriptor. There are two table types -// - the L-Path table (relevant to x86) and the M-Path table. The only -// difference between these two tables is that multi-byte values in the L-Table -// are LSB-first and the values in the M-Table are MSB-first. -// -// The path table is in ascending order of directory level and is alphabetically -// sorted within each directory level. -type PathTable struct { - DirIDLength int `struc:"uint8,sizeof=DirName"` - ExtendedAttrsLen int `struc:"uint8"` - // Number the Logical Block Number of the first Logical Block allocated to - // the Extent in which the directory is recorded. - // This is in a different format depending on whether this is the L-Table or - // M-Table (see explanation above). - ExtentLocation int `struc:"int32"` - // Number of record for parent directory (or 1 for the root directory), as a - // word; the first record is number 1, the second record is number 2, etc. - // Directory number of parent directory (an index in to the path table). - // This is the field that limits the table to 65536 records. - ParentDirNumber int `struc:"int16"` - // Directory Identifier (name) in d-characters. - DirName string -} diff --git a/vendor/github.com/hooklift/iso9660/reader.go b/vendor/github.com/hooklift/iso9660/reader.go deleted file mode 100644 index 1c7d00a66cc5..000000000000 --- a/vendor/github.com/hooklift/iso9660/reader.go +++ /dev/null @@ -1,251 +0,0 @@ -package iso9660 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/c4milo/gotoolkit" -) - -var ( - // ErrInvalidImage is returned when an attempt to unpack the image Primary Volume Descriptor failed or - // when the end of the image was reached without finding a primary volume descriptor - ErrInvalidImage = func(err error) error { return fmt.Errorf("invalid-iso9660-image: %s", err) } - // ErrCorruptedImage is returned when a seek operation, on the image, failed. - ErrCorruptedImage = func(err error) error { return fmt.Errorf("corrupted-image: %s", err) } -) - -// Reader defines the state of the ISO9660 image reader. It needs to be instantiated -// from its constructor. -type Reader struct { - // File descriptor to the opened ISO image - image *os.File - // Copy of unencoded Primary Volume Descriptor - pvd PrimaryVolume - // Queue used to walk through file system iteratively - queue gotoolkit.Queue - // Current sector - sector uint32 - // Current bytes read from current sector. - read uint32 -} - -// NewReader creates a new ISO 9660 image reader. -func NewReader(rs *os.File) (*Reader, error) { - // Starts reading from image data area - sector := dataAreaSector - // Iterates over volume descriptors until it finds the primary volume descriptor - // or an error condition. - for { - offset, err := rs.Seek(int64(sector*sectorSize), os.SEEK_SET) - if err != nil { - return nil, ErrCorruptedImage(err) - } - - var volDesc VolumeDescriptor - if err := binary.Read(rs, binary.BigEndian, &volDesc); err != nil { - return nil, ErrCorruptedImage(err) - } - - if volDesc.Type == primaryVol { - // backs up to the beginning of the sector again in order to unpack - // the entire primary volume descriptor more easily. - if _, err := rs.Seek(offset, os.SEEK_SET); err != nil { - return nil, ErrCorruptedImage(err) - } - - reader := new(Reader) - reader.image = rs - reader.queue = new(gotoolkit.SliceQueue) - - if err := reader.unpackPVD(); err != nil { - return nil, ErrCorruptedImage(err) - } - - return reader, nil - } - - if volDesc.Type == volSetTerminator { - return nil, ErrInvalidImage(errors.New("Volume Set Terminator reached. A Primary Volume Descriptor was not found.")) - } - sector++ - } -} - -// Skip skips the given number of directory records. -func (r *Reader) Skip(n int) error { - var drecord File - var len byte - var err error - for i := 0; i < n; i++ { - if len, err = r.unpackDRecord(&drecord); err != nil { - return err - } - r.read += uint32(len) - } - return nil -} - -// Next moves onto the next directory record present in the image. -// It does not use the Path Table since the goal is to read everything -// from the ISO image. -func (r *Reader) Next() (os.FileInfo, error) { - if r.queue.IsEmpty() { - return nil, io.EOF - } - - // We only dequeue the directory when it does not contain more children - // or when it is empty and there is no children to iterate over. - item, err := r.queue.Peek() - if err != nil { - panic(err) - } - - f := item.(File) - if r.sector == 0 { - r.sector = f.ExtentLocationBE - _, err := r.image.Seek(int64(r.sector*sectorSize), os.SEEK_SET) - if err != nil { - return nil, ErrCorruptedImage(err) - } - - // Skips . and .. directories - if err = r.Skip(2); err != nil { - return nil, ErrCorruptedImage(err) - } - } - - var drecord File - var len byte - if (r.read % sectorSize) == 0 { - r.sector++ - _, err := r.image.Seek(int64(r.sector*sectorSize), os.SEEK_SET) - if err != nil { - return nil, ErrCorruptedImage(err) - } - } - - if len, err = r.unpackDRecord(&drecord); err != nil && err != io.EOF { - return nil, ErrCorruptedImage(err) - } - r.read += uint32(len) - - if err == io.EOF { - // directory record is empty, sector space wasted, move onto next sector. - rsize := (sectorSize - (r.read % sectorSize)) - buf := make([]byte, rsize) - if err := binary.Read(r.image, binary.BigEndian, buf); err != nil { - return nil, ErrCorruptedImage(err) - } - r.read += rsize - } - - // If there is no more entries in the current directory, dequeue it. - if r.read >= f.ExtentLengthBE { - r.read = 0 - r.sector = 0 - r.queue.Dequeue() - } - - // End of directory listing, drecord is empty so we don't bother - // to return it and keep iterating to look for the next actual - // directory or file. - if drecord.fileID == "" { - return r.Next() - } - - parent := f.Name() - if parent == "\x00" { - parent = "/" - } - drecord.fileID = filepath.Join(parent, drecord.fileID) - - if drecord.IsDir() { - r.queue.Enqueue(drecord) - } else { - drecord.image = r.image - } - - return &drecord, nil -} - -// unpackDRecord unpacks directory record bits into Go's struct -func (r *Reader) unpackDRecord(f *File) (byte, error) { - // Gets the directory record length - var len byte - if err := binary.Read(r.image, binary.BigEndian, &len); err != nil { - return len, ErrCorruptedImage(err) - } - - if len == 0 { - return len + 1, io.EOF - } - - // Reads directory record into Go struct - var drecord DirectoryRecord - if err := binary.Read(r.image, binary.BigEndian, &drecord); err != nil { - return len, ErrCorruptedImage(err) - } - - f.DirectoryRecord = drecord - // Gets the name - name := make([]byte, drecord.FileIDLength) - if err := binary.Read(r.image, binary.BigEndian, name); err != nil { - return len, ErrCorruptedImage(err) - } - f.fileID = string(name) - - // Padding field as per section 9.1.12 in ECMA-119 - if (drecord.FileIDLength % 2) == 0 { - var zero byte - if err := binary.Read(r.image, binary.BigEndian, &zero); err != nil { - return len, ErrCorruptedImage(err) - } - } - - // System use field as per section 9.1.13 in ECMA-119 - // Directory record has 34 bytes in addition to the name's - // variable length and the padding field mentioned in section 9.1.12 - totalLen := 34 + drecord.FileIDLength - (drecord.FileIDLength % 2) - sysUseLen := int64(len - totalLen) - if sysUseLen > 0 { - sysData := make([]byte, sysUseLen) - if err := binary.Read(r.image, binary.BigEndian, sysData); err != nil { - return len, ErrCorruptedImage(err) - } - } - return len, nil -} - -// unpackPVD unpacks Primary Volume Descriptor in three phases. This is -// because the root directory record is a variable-length record and Go's binary -// package doesn't support unpacking variable-length structs easily. -func (r *Reader) unpackPVD() error { - // Unpack first half - var pvd1 PrimaryVolumePart1 - if err := binary.Read(r.image, binary.BigEndian, &pvd1); err != nil { - return ErrCorruptedImage(err) - } - r.pvd.PrimaryVolumePart1 = pvd1 - - // Unpack root directory record - var drecord File - if _, err := r.unpackDRecord(&drecord); err != nil { - return ErrCorruptedImage(err) - } - r.pvd.DirectoryRecord = drecord.DirectoryRecord - r.queue.Enqueue(drecord) - - // Unpack second half - var pvd2 PrimaryVolumePart2 - if err := binary.Read(r.image, binary.BigEndian, &pvd2); err != nil { - return ErrCorruptedImage(err) - } - r.pvd.PrimaryVolumePart2 = pvd2 - - return nil -} diff --git a/vendor/github.com/hooklift/iso9660/writer.go b/vendor/github.com/hooklift/iso9660/writer.go deleted file mode 100644 index f23a9f7f7f78..000000000000 --- a/vendor/github.com/hooklift/iso9660/writer.go +++ /dev/null @@ -1 +0,0 @@ -package iso9660 diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298da2..000000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index 6e9aa7baf354..000000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. - -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Usage - -From my own work-in-progress project: - - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 - } - - type FssnConfig struct { - Network networkConfig - } - - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, - } - - // Inside a function [...] - - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) - } - - // More code [...] - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index 44361e88bebd..000000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil { - return - } - } else { - if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}) error { - var ( - vDst, vSrc reflect.Value - err error - ) - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 5d328b1fe774..000000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "reflect" -) - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - switch dst.Kind() { - case reflect.Struct: - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil { - return - } - } - case reflect.Map: - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Map: - if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil { - return - } - } - if !dstElement.IsValid() { - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if src.IsNil() { - break - } else if dst.IsNil() { - if dst.CanSet() && isEmptyValue(dst) { - dst.Set(src) - } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil { - return - } - default: - if dst.CanSet() && !isEmptyValue(src) { - dst.Set(src) - } - } - return -} - -// Merge sets fields' values in dst from src if they have a zero -// value of their type. -// dst and src must be valid same-type structs and dst must be -// a pointer to struct. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -func Merge(dst, src interface{}) error { - var ( - vDst, vSrc reflect.Value - err error - ) - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0) -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index f8a0991ec63e..000000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml deleted file mode 100644 index 62fdb61ec329..000000000000 --- a/vendor/github.com/imdario/mergo/testdata/license.yml +++ /dev/null @@ -1,3 +0,0 @@ -import: ../../../../fossene/db/schema/thing.yml -fields: - site: string diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a7bb..000000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4bab92..000000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e3ec..000000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3c30..000000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/intel-go/cpuid/LICENSE b/vendor/github.com/intel-go/cpuid/LICENSE deleted file mode 100644 index 24b67017ab4a..000000000000 --- a/vendor/github.com/intel-go/cpuid/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Intel Corporation -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/intel-go/cpuid/cpuid.go b/vendor/github.com/intel-go/cpuid/cpuid.go deleted file mode 100644 index b768f98163a9..000000000000 --- a/vendor/github.com/intel-go/cpuid/cpuid.go +++ /dev/null @@ -1,537 +0,0 @@ -// Copyright 2015 Intel Corporation. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpuid provides access to the information available -// through the CPUID instruction. -// All information is gathered during package initialization phase -// so package's public interface doesn't call CPUID instruction. -package cpuid - -// VendorIdentificationString like "GenuineIntel" or "AuthenticAMD" -var VendorIdentificatorString string - -// ProcessorBrandString like "Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz" -var ProcessorBrandString string - -// SteppingId is Processor Stepping ID as described in -// Intel® 64 and IA-32 Architectures Software Developer’s Manual -var SteppingId uint32 - -// ProcessorType obtained from processor Version Information, according to -// Intel® 64 and IA-32 Architectures Software Developer’s Manual -var ProcessorType uint32 - -// DisplayFamily is Family of processors obtained from processor Version Information, according to -// Intel® 64 and IA-32 Architectures Software Developer’s Manual -var DisplayFamily uint32 - -// Display Model is Model of processor obtained from processor Version Information, according to -// Intel® 64 and IA-32 Architectures Software Developer’s Manual -var DisplayModel uint32 - -// Cache line size in bytes -var CacheLineSize uint32 - -// Maximum number of addressable IDs for logical processors in this physical package -var MaxLogocalCPUId uint32 - -// Initial APIC ID -var InitialAPICId uint32 - -// Cache descriptor's array -// You can iterate like there: -// for _, cacheDescription := range cpuid.CacheDescriptors { -// fmt.Printf("CacheDescriptor: %v\n", cacheDescription) -// } -// See CacheDescriptor type for more information -var CacheDescriptors []CacheDescriptor - -// Smallest monitor-line size in bytes (default is processor's monitor granularity) -var MonLineSizeMin uint32 - -// Largest monitor-line size in bytes (default is processor's monitor granularity) -var MonLineSizeMax uint32 - -// Enumeration of Monitor-Mwait extensions availability status -var MonitorEMX bool - -// Supports treating interrupts as break-event for MWAIT flag -var MonitorIBE bool - -// EnabledAVX flag allows to check if feature AVX is enabled by OS/BIOS -var EnabledAVX bool = false - -// EnabledAVX512 flag allows to check if features AVX512xxx are enabled by OS/BIOS -var EnabledAVX512 bool = false - -type CacheDescriptor struct { - Level int // Cache level - CacheType int // Cache type - CacheName string // Name - CacheSize int // in KBytes (of page size for TLB) - Ways int // Associativity, 0 undefined, 0xFF fully associate - LineSize int // Cache line size in bytes - Entries int // number of entries for TLB - Partioning int // partitioning -} - -// ThermalSensorInterruptThresholds is the number of interrupt thresholds in digital thermal sensor. -var ThermalSensorInterruptThresholds uint32 - -// HasFeature to check if features from FeatureNames map are available on the current processor -func HasFeature(feature uint64) bool { - return (featureFlags & feature) != 0 -} - -// HasExtendedFeature to check if features from ExtendedFeatureNames map are available on the current processor -func HasExtendedFeature(feature uint64) bool { - return (extendedFeatureFlags & feature) != 0 -} - -// HasExtraFeature to check if features from ExtraFeatureNames map are available on the current processor -func HasExtraFeature(feature uint64) bool { - return (extraFeatureFlags & feature) != 0 -} - -// HasThermalAndPowerFeature to check if features from ThermalAndPowerFeatureNames map are available on the current processor -func HasThermalAndPowerFeature(feature uint32) bool { - return (thermalAndPowerFeatureFlags & feature) != 0 -} - -var FeatureNames = map[uint64]string{ - SSE3: "SSE3", - PCLMULQDQ: "PCLMULQDQ", - DTES64: "DTES64", - MONITOR: "MONITOR", - DSI_CPL: "DSI_CPL", - VMX: "VMX", - SMX: "SMX", - EST: "EST", - TM2: "TM2", - SSSE3: "SSSE3", - CNXT_ID: "CNXT_ID", - SDBG: "SDBG", - FMA: "FMA", - CX16: "CX16", - XTPR: "XTPR", - PDCM: "PDCM", - PCID: "PCID", - DCA: "DCA", - SSE4_1: "SSE4_1", - SSE4_2: "SSE4_2", - X2APIC: "X2APIC", - MOVBE: "MOVBE", - POPCNT: "POPCNT", - TSC_DEADLINE: "TSC_DEADLINE", - AES: "AES", - XSAVE: "XSAVE", - OSXSAVE: "OSXSAVE", - AVX: "AVX", - F16C: "F16C", - RDRND: "RDRND", - HYPERVISOR: "HYPERVISOR", - FPU: "FPU", - VME: "VME", - DE: "DE", - PSE: "PSE", - TSC: "TSC", - MSR: "MSR", - PAE: "PAE", - MCE: "MCE", - CX8: "CX8", - APIC: "APIC", - SEP: "SEP", - MTRR: "MTRR", - PGE: "PGE", - MCA: "MCA", - CMOV: "CMOV", - PAT: "PAT", - PSE_36: "PSE_36", - PSN: "PSN", - CLFSH: "CLFSH", - DS: "DS", - ACPI: "ACPI", - MMX: "MMX", - FXSR: "FXSR", - SSE: "SSE", - SSE2: "SSE2", - SS: "SS", - HTT: "HTT", - TM: "TM", - IA64: "IA64", - PBE: "PBE", -} - -var ThermalAndPowerFeatureNames = map[uint32]string{ // From leaf06 - ARAT: "ARAT", - PLN: "PLN", - ECMD: "ECMD", - PTM: "PTM", - HDC: "HDC", - HCFC: "HCFC", - HWP: "HWP", - HWP_NOTIF: "HWP_NOTIF", - HWP_ACTIVITY_WINDOW: "HWP_ACTIVITY_WINDOW", - HWP_ENERGY_PERFORMANCE: "HWP_ENERGY_PERFORMANCE", - HWP_PACKAGE_LEVEL_REQUEST: "HWP_PACKAGE_LEVEL_REQUEST", - PERFORMANCE_ENERGY_BIAS: "PERFORMANCE_ENERGY_BIAS", - TEMPERATURE_SENSOR: "TEMPERATURE_SENSOR", - TURBO_BOOST: "TURBO_BOOST", - TURBO_BOOST_MAX: "TURBO_BOOST_MAX", -} - -var ExtendedFeatureNames = map[uint64]string{ // From leaf07 - FSGSBASE: "FSGSBASE", - IA32_TSC_ADJUST: "IA32_TSC_ADJUST", - BMI1: "BMI1", - HLE: "HLE", - AVX2: "AVX2", - SMEP: "SMEP", - BMI2: "BMI2", - ERMS: "ERMS", - INVPCID: "INVPCID", - RTM: "RTM", - PQM: "PQM", - DFPUCDS: "DFPUCDS", - MPX: "MPX", - PQE: "PQE", - AVX512F: "AVX512F", - AVX512DQ: "AVX512DQ", - RDSEED: "RDSEED", - ADX: "ADX", - SMAP: "SMAP", - AVX512IFMA: "AVX512IFMA", - PCOMMIT: "PCOMMIT", - CLFLUSHOPT: "CLFLUSHOPT", - CLWB: "CLWB", - INTEL_PROCESSOR_TRACE: "INTEL_PROCESSOR_TRACE", - AVX512PF: "AVX512PF", - AVX512ER: "AVX512ER", - AVX512CD: "AVX512CD", - SHA: "SHA", - AVX512BW: "AVX512BW", - AVX512VL: "AVX512VL", - PREFETCHWT1: "PREFETCHWT1", - AVX512VBMI: "AVX512VBMI", -} - -var ExtraFeatureNames = map[uint64]string{ // From leaf 8000 0001 - LAHF_LM: "LAHF_LM", - CMP_LEGACY: "CMP_LEGACY", - SVM: "SVM", - EXTAPIC: "EXTAPIC", - CR8_LEGACY: "CR8_LEGACY", - ABM: "ABM", - SSE4A: "SSE4A", - MISALIGNSSE: "MISALIGNSSE", - PREFETCHW: "PREFETCHW", - OSVW: "OSVW", - IBS: "IBS", - XOP: "XOP", - SKINIT: "SKINIT", - WDT: "WDT", - LWP: "LWP", - FMA4: "FMA4", - TCE: "TCE", - NODEID_MSR: "NODEID_MSR", - TBM: "TBM", - TOPOEXT: "TOPOEXT", - PERFCTR_CORE: "PERFCTR_CORE", - PERFCTR_NB: "PERFCTR_NB", - SPM: "SPM", - DBX: "DBX", - PERFTSC: "PERFTSC", - PCX_L2I: "PCX_L2I", - FPU_2: "FPU", - VME_2: "VME", - DE_2: "DE", - PSE_2: "PSE", - TSC_2: "TSC", - MSR_2: "MSR", - PAE_2: "PAE", - MCE_2: "MCE", - CX8_2: "CX8", - APIC_2: "APIC", - SYSCALL: "SYSCALL", - MTRR_2: "MTRR", - PGE_2: "PGE", - MCA_2: "MCA", - CMOV_2: "CMOV", - PAT_2: "PAT", - PSE36: "PSE36", - MP: "MP", - NX: "NX", - MMXEXT: "MMXEXT", - MMX_2: "MMX", - FXSR_2: "FXSR", - FXSR_OPT: "FXSR_OPT", - PDPE1GB: "PDPE1GB", - RDTSCP: "RDTSCP", - LM: "LM", - _3DNOWEXT: "3DNOWEXT", - _3DNOW: "3DNOW", -} - -var brandStrings = map[string]int{ - "AMDisbetter!": AMD, - "AuthenticAMD": AMD, - "CentaurHauls": CENTAUR, - "CyrixInstead": CYRIX, - "GenuineIntel": INTEL, - "TransmetaCPU": TRANSMETA, - "GenuineTMx86": TRANSMETA, - "Geode by NSC": NATIONALSEMICONDUCTOR, - "NexGenDriven": NEXGEN, - "RiseRiseRise": RISE, - "SiS SiS SiS ": SIS, - "UMC UMC UMC ": UMC, - "VIA VIA VIA ": VIA, - "Vortex86 SoC": VORTEX, - "KVMKVMKVM": KVM, - "Microsoft Hv": HYPERV, - "VMwareVMware": VMWARE, - "XenVMMXenVMM": XEN, -} - -var maxInputValue uint32 -var maxExtendedInputValue uint32 -var extendedModelId uint32 -var extendedFamilyId uint32 -var brandIndex uint32 -var brandId int -var featureFlags uint64 -var thermalAndPowerFeatureFlags uint32 -var extendedFeatureFlags uint64 -var extraFeatureFlags uint64 - -const ( - UKNOWN = iota - AMD - CENTAUR - CYRIX - INTEL - TRANSMETA - NATIONALSEMICONDUCTOR - NEXGEN - RISE - SIS - UMC - VIA - VORTEX - KVM - HYPERV - VMWARE - XEN -) - -const ( - SSE3 = uint64(1) << iota - PCLMULQDQ - DTES64 - MONITOR - DSI_CPL - VMX - SMX - EST - TM2 - SSSE3 - CNXT_ID - SDBG - FMA - CX16 - XTPR - PDCM - _ - PCID - DCA - SSE4_1 - SSE4_2 - X2APIC - MOVBE - POPCNT - TSC_DEADLINE - AES - XSAVE - OSXSAVE - AVX - F16C - RDRND - HYPERVISOR - FPU - VME - DE - PSE - TSC - MSR - PAE - MCE - CX8 - APIC - _ - SEP - MTRR - PGE - MCA - CMOV - PAT - PSE_36 - PSN - CLFSH - _ - DS - ACPI - MMX - FXSR - SSE - SSE2 - SS - HTT - TM - IA64 - PBE -) - -const ( - FSGSBASE = uint64(1) << iota - IA32_TSC_ADJUST - _ - BMI1 - HLE - AVX2 - _ - SMEP - BMI2 - ERMS - INVPCID - RTM - PQM - DFPUCDS - MPX - PQE - AVX512F - AVX512DQ - RDSEED - ADX - SMAP - AVX512IFMA - PCOMMIT - CLFLUSHOPT - CLWB - INTEL_PROCESSOR_TRACE - AVX512PF - AVX512ER - AVX512CD - SHA - AVX512BW - AVX512VL - // ECX's const from there - PREFETCHWT1 - AVX512VBMI -) - -const ( - LAHF_LM = uint64(1) << iota - CMP_LEGACY - SVM - EXTAPIC - CR8_LEGACY - ABM - SSE4A - MISALIGNSSE - PREFETCHW - OSVW - IBS - XOP - SKINIT - WDT - _ - LWP - FMA4 - TCE - _ - NODEID_MSR - _ - TBM - TOPOEXT - PERFCTR_CORE - PERFCTR_NB - SPM - DBX - PERFTSC - PCX_L2I - _ - _ - _ - // EDX features from there - FPU_2 - VME_2 - DE_2 - PSE_2 - TSC_2 - MSR_2 - PAE_2 - MCE_2 - CX8_2 - APIC_2 - _ - SYSCALL - MTRR_2 - PGE_2 - MCA_2 - CMOV_2 - PAT_2 - PSE36 - _ - MP - NX - _ - MMXEXT - MMX_2 - FXSR_2 - FXSR_OPT - PDPE1GB - RDTSCP - _ - LM - _3DNOWEXT - _3DNOW -) - -// Thermal and Power Management features -const ( - // EAX bits 0-15 - TEMPERATURE_SENSOR = uint32(1) << iota // Digital temperature sensor - TURBO_BOOST // Intel Turbo Boost Technology available - ARAT // APIC-Timer-always-running feature is supported if set. - _ // Reserved - PLN // Power limit notification controls - ECMD // Clock modulation duty cycle extension - PTM // Package thermal management - HWP // HWP base registers (IA32_PM_ENABLE[bit 0], IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS) - HWP_NOTIF // IA32_HWP_INTERRUPT MSR - HWP_ACTIVITY_WINDOW // IA32_HWP_REQUEST[bits 41:32] - HWP_ENERGY_PERFORMANCE // IA32_HWP_REQUEST[bits 31:24] - HWP_PACKAGE_LEVEL_REQUEST // IA32_HWP_REQUEST_PKG MSR - _ // Reserved (eax bit 12) - HDC // HDC base registers IA32_PKG_HDC_CTL, IA32_PM_CTL1, IA32_THREAD_STALL MSRs - TURBO_BOOST_MAX // Intel® Turbo Boost Max Technology - _ // Reserved (eax bit 15) - - // ECX bits 0-15 - HCFC // Hardware Coordination Feedback Capability - _ - _ - PERFORMANCE_ENERGY_BIAS // Processor supports performance-energy bias preference -) - -const ( - NULL = iota - DATA_CACHE - INSTRUCTION_CACHE - UNIFIED_CACHE - TLB - DTLB - STLB - PREFETCH -) diff --git a/vendor/github.com/intel-go/cpuid/cpuid_amd64.go b/vendor/github.com/intel-go/cpuid/cpuid_amd64.go deleted file mode 100644 index 49f2716993c5..000000000000 --- a/vendor/github.com/intel-go/cpuid/cpuid_amd64.go +++ /dev/null @@ -1,595 +0,0 @@ -// +build amd64 -// Copyright 2015 Intel Corporation. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpuid provides access to the information available -// through the CPUID instruction. -// All information is gathered during package initialization phase -// so package's public interface doesn't call CPUID instruction. -package cpuid - -func cpuid_low(arg1, arg2 uint32) (eax, ebx, ecx, edx uint32) // implemented in cpuidlow_amd64.s -func xgetbv_low(arg1 uint32) (eax, edx uint32) // implemented in cpuidlow_amd64.s - -func init() { - detectFeatures() -} - -func detectFeatures() { - leaf0() - leaf1() - leaf2() - leaf3() - leaf4() - leaf5() - leaf6() - leaf7() - leaf0x80000000() - leaf0x80000001() - leaf0x80000004() - leaf0x80000005() - leaf0x80000006() - - if HasFeature(OSXSAVE) { - eax, _ := xgetbv_low(0) - if (eax & 0x6) == 0x6 { - EnabledAVX = true - } - if (eax & 0xE0) == 0xE0 { - EnabledAVX512 = true - } - } -} - -var leaf02Names = [...]string{ - "NULL", - "DATA_CACHE", - "INSTRUCTION_CACHE", - "UNIFIED_CACHE", - "TLB", - "DTLB", - "STLB", - "PREFETCH", -} - -func leaf0() { - - eax, ebx, ecx, edx := cpuid_low(0, 0) - - maxInputValue = eax - - VendorIdentificatorString = string(int32sToBytes(ebx, edx, ecx)) - brandId = brandStrings[VendorIdentificatorString] -} - -func leaf1() { - - if maxInputValue < 1 { - return - } - - eax, ebx, ecx, edx := cpuid_low(1, 0) - // Parse EAX - SteppingId = (eax & 0xF) - modelId := (eax >> 4) & 0xF - familyId := (eax >> 8) & 0xF - ProcessorType = (eax >> 12) & 0x3 - ExtendedModelId := (eax >> 16) & 0xF - extendedFamilyId := (eax >> 20) & 0xFF - - DisplayFamily = familyId - DisplayModel = modelId - - if familyId == 0xF { - DisplayFamily = extendedFamilyId + familyId - } - - if familyId == 0x6 || familyId == 0xF { - DisplayModel = ExtendedModelId<<4 + modelId - } - - // Parse EBX - brandIndex = ebx & 0xFF - CacheLineSize = ((ebx >> 8) & 0xFF) << 3 - MaxLogocalCPUId = (ebx >> 16) & 0xFF - InitialAPICId = (ebx >> 24) - - // Parse ECX & EDX not needed. Ask through HasFeature function - featureFlags = (uint64(edx) << 32) | uint64(ecx) -} - -func leaf2() { - - if brandId != INTEL { - return - } - if maxInputValue < 2 { - return - } - - bytes := int32sToBytes(cpuid_low(2, 0)) - - for i := 0; i < len(bytes); i++ { - if (i%4 == 0) && (bytes[i+3]&(1<<7) != 0) { - i += 4 - continue - } - if bytes[i] == 0xFF { // it means that we should use leaf 4 for cache info - CacheDescriptors = CacheDescriptors[0:0] - break - } - CacheDescriptors = append(CacheDescriptors, leaf02Descriptors[int16(bytes[i])]) - } -} - -func leaf3() { - if brandId != INTEL { - return - } - - if maxInputValue < 3 { - return - } - // TODO SerialNumber for < Pentium 4 -} - -func leaf4() { - - if brandId != INTEL { - return - } - - if maxInputValue < 4 { - return - } - - cacheId := 0 - for { - eax, ebx, ecx, _ := cpuid_low(4, uint32(cacheId)) - cacheId++ - cacheType := eax & 0xF - - if cacheType == NULL { - break - } - - cacheLevel := (eax >> 5) & 0x7 - // selfInitializingCacheLevel := eax & (1<<8) - // fullyAssociativeCache := eax & (1<<9) - // maxNumLogicalCoresSharing := (eax >> 14) & 0x3FF - // maxNumPhisCores := (eax >> 26) & 0x3F - systemCoherencyLineSize := (ebx & 0xFFF) + 1 - physicalLinePartions := (ebx>>12)&0x3FF + 1 - waysOfAssiociativity := (ebx>>22)&0x3FF + 1 - numberOfSets := ecx + 1 - // writeBackInvalidate := edx & 1 - // cacheInclusiveness := edx & (1<<1) - // complexCacheIndexing := edx & (1<<2) - cacheSize := (waysOfAssiociativity * physicalLinePartions * - systemCoherencyLineSize * numberOfSets) >> 10 - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{int(cacheLevel), - int(cacheType), - "", - int(cacheSize), - int(waysOfAssiociativity), - int(systemCoherencyLineSize), - int(numberOfSets), - int(physicalLinePartions), - }) - } -} - -func leaf5() { - if maxInputValue < 5 { - return - } - - eax, ebx, ecx, _ := cpuid_low(4, 0) // TODO process EDX with C0-C7 C-states - MonLineSizeMax = eax & (0xFFFF) - MonLineSizeMax = ebx & (0xFFFF) - MonitorEMX = (ecx & (1 << 0)) != 0 - MonitorIBE = (ecx & (1 << 1)) != 0 - -} - -func leaf6() { - // Thermal and Power Management Features for Intel - if maxInputValue < 6 { - return - } - - eax, ebx, ecx, _ := cpuid_low(6, 0) - thermalAndPowerFeatureFlags = (eax & 0xFFFF) | (ecx << 16) - ThermalSensorInterruptThresholds = ebx & 7 -} - -func leaf7() { - _, ebx, ecx, _ := cpuid_low(7, 0) - extendedFeatureFlags = (uint64(ecx) << 32) | uint64(ebx) -} - -func leaf0x80000000() { - maxExtendedInputValue, _, _, _ = cpuid_low(0x80000000, 0) -} - -func leaf0x80000001() { - if maxExtendedInputValue < 0x80000001 { - return - } - _, _, ecx, edx := cpuid_low(0x80000001, 0) - //extendedProcessorSignatureAndFeatureBits := eax - extraFeatureFlags = (uint64(edx) << 32) | uint64(ecx) -} - -// leaf0x80000004 looks at the Processor Brand String in leaves 0x80000002 through 0x80000004 -func leaf0x80000004() { - if maxExtendedInputValue < 0x80000004 { - return - } - - ProcessorBrandString += string(int32sToBytes(cpuid_low(0x80000002, 0))) - ProcessorBrandString += string(int32sToBytes(cpuid_low(0x80000003, 0))) - ProcessorBrandString += string(int32sToBytes(cpuid_low(0x80000004, 0))) -} - -func leaf0x80000005() { - // AMD L1 Cache and TLB Information - if maxExtendedInputValue < 0x80000005 { - return - } - - if brandId != AMD { - return - } - - eax, ebx, ecx, edx := cpuid_low(0x80000005, 0) - - L1DTlb2and4MAssoc := (eax >> 24) & 0xFF - L1DTlb2and4MSize := (eax >> 16) & 0xFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{1, - DTLB, - "DTLB 2M/4M", - 2 * 1024, - int(L1DTlb2and4MAssoc), - -1, - int(L1DTlb2and4MSize), - 0, - }) - - L1ITlb2and4MAssoc := (eax >> 8) & 0xFF - L1ITlb2and4MSize := (eax) & 0xFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{1, - TLB, - "ITLB 2M/4M", - 2 * 1024, - int(L1ITlb2and4MAssoc), - -1, - int(L1ITlb2and4MSize), - 0, - }) - - L1DTlb4KAssoc := (ebx >> 24) & 0xFF - L1DTlb4KSize := (ebx >> 16) & 0xFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{1, - DTLB, - "DTLB 4K", - 4, - int(L1DTlb4KAssoc), - -1, - int(L1DTlb4KSize), - 0, - }) - - L1ITlb4KAssoc := (ebx >> 8) & 0xFF - L1ITlb4KSize := (ebx) & 0xFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{1, - TLB, - "ITLB 4K", - 4, - int(L1ITlb4KAssoc), - -1, - int(L1ITlb4KSize), - 0, - }) - - L1DcSize := (ecx >> 24) & 0xFF - L1DcAssoc := (ecx >> 16) & 0xFF - L1DcLinesPerTag := (ecx >> 8) & 0xFF - L1DcLineSize := (ecx >> 0) & 0xFF - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{1, - DATA_CACHE, - "L1 Data cache", - int(L1DcSize), - int(L1DcAssoc), - int(L1DcLineSize), - -1, - int(L1DcLinesPerTag), - }) - - L1IcSize := (edx >> 24) & 0xFF - L1IcAssoc := (edx >> 16) & 0xFF - L1IcLinesPerTag := (edx >> 8) & 0xFF - L1IcLineSize := (edx >> 0) & 0xFF - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{1, - INSTRUCTION_CACHE, - "L1 Instruction cache", - int(L1IcSize), - int(L1IcAssoc), - int(L1IcLineSize), - -1, - int(L1IcLinesPerTag), - }) -} - -func leaf0x80000006() { - - if maxExtendedInputValue < 0x80000006 { - return - } - - var associativityEncodings = map[uint]uint{ - 0x00: 0, - 0x01: 1, - 0x02: 2, - 0x04: 4, - 0x06: 8, - 0x08: 16, - 0x0A: 32, - 0x0B: 48, - 0x0C: 64, - 0x0D: 96, - 0x0E: 128, - 0x0F: 0xFF, // - Fully associative - } - - eax, ebx, ecx, edx := cpuid_low(0x80000006, 0) - - if brandId == INTEL { - - CacheLineSize := (ecx >> 0) & 0xFF - L2Associativity := uint((ecx >> 12) & 0xF) - CacheSize := (ecx >> 16) & 0xFFFF - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{2, - 0, - "Cache info from leaf 0x80000006 for Intel", - int(CacheSize), - int(associativityEncodings[L2Associativity]), - int(CacheLineSize), - -1, - 0, - }) - } - - if brandId == AMD { - - L2DTlb2and4MAssoc := uint((eax >> 28) & 0xF) - L2DTlb2and4MSize := (eax >> 16) & 0xFFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{2, - DTLB, - "DTLB 2M/4M", - 2 * 1024, - int(associativityEncodings[L2DTlb2and4MAssoc]), - -1, - int(L2DTlb2and4MSize), - 0, - }) - - L2ITlb2and4MAssoc := uint((eax >> 12) & 0xF) - L2ITlb2and4MSize := (eax) & 0xFFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{2, - TLB, - "ITLB 2M/4M", - 2 * 1024, - int(associativityEncodings[L2ITlb2and4MAssoc]), - -1, - int(L2ITlb2and4MSize), - 0, - }) - - L2DTlb4KAssoc := uint((ebx >> 28) & 0xF) - L2DTlb4KSize := (ebx >> 16) & 0xFFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{2, - DTLB, - "DTLB 4K", - 4, - int(associativityEncodings[L2DTlb4KAssoc]), - -1, - int(L2DTlb4KSize), - 0, - }) - - L2ITlb4KAssoc := uint((ebx >> 12) & 0xF) - L2ITlb4KSize := (ebx) & 0xFFF - - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{2, - TLB, - "ITLB 4K", - 4, - int(associativityEncodings[L2ITlb4KAssoc]), - -1, - int(L2ITlb4KSize), - 0, - }) - - L2Size := (ecx >> 16) & 0xFFFF - L2Assoc := uint((ecx >> 12) & 0xF) - L2LinesPerTag := (ecx >> 8) & 0xF - L2LineSize := (ecx >> 0) & 0xFF - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{2, - DATA_CACHE, - "L2 Data cache", - int(L2Size), - int(associativityEncodings[L2Assoc]), - int(L2LineSize), - -1, - int(L2LinesPerTag), - }) - - L3Size := ((edx >> 18) & 0xF) * 512 - L3Assoc := uint((edx >> 12) & 0xF) - L3LinesPerTag := (edx >> 8) & 0xF - L3LineSize := (edx >> 0) & 0xFF - CacheDescriptors = append(CacheDescriptors, - CacheDescriptor{3, - DATA_CACHE, - "L3 Data cache", - int(L3Size), - int(associativityEncodings[L3Assoc]), - int(L3LineSize), - -1, - int(L3LinesPerTag), - }) - } -} - -// TODO split fused descritops with bits in high key's byte like for 0x49 -var leaf02Descriptors = map[int16]CacheDescriptor{ - 0x01: {-1, TLB, "Instruction TLB", 4, 4, -1, 32, 0}, - 0x02: {-1, TLB, "Instruction TLB", 4 * 1024, 0xFF, -1, 2, 0}, - 0x03: {-1, TLB, "Data TLB", 4, 4, -1, 64, 0}, - 0x04: {-1, TLB, "Data TLB", 4 * 1024, 4, -1, 8, 0}, - 0x05: {-1, TLB, "Data TLB1", 4 * 1024, 4, -1, 32, 0}, - 0x06: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 8, 4, 32, -1, 0}, - 0x08: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 16, 4, 32, -1, 0}, - 0x09: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 32, 4, 64, -1, 0}, - 0x0A: {1, DATA_CACHE, "1st-level data cache", 8, 2, 32, -1, 0}, - 0x0B: {-1, TLB, "Instruction TLB", 4 * 1024, 4, -1, 4, 0}, - 0x0C: {1, DATA_CACHE, "1st-level data cache", 16, 4, 32, -1, 0}, - 0x0D: {1, DATA_CACHE, "1st-level data cache", 16, 4, 64, -1, 0}, - 0x0E: {1, DATA_CACHE, "1st-level data cache", 24, 6, 64, -1, 0}, - 0x1D: {2, DATA_CACHE, "2nd-level cache", 128, 2, 64, -1, 0}, - 0x21: {2, DATA_CACHE, "2nd-level cache", 256, 8, 64, -1, 0}, - 0x22: {3, DATA_CACHE, "3nd-level cache", 512, 4, 64, -1, 2}, - 0x23: {3, DATA_CACHE, "3nd-level cache", 1 * 1024, 8, 64, -1, 2}, - 0x24: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 16, 64, -1, 0}, - 0x25: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 8, 64, -1, 2}, - 0x29: {3, DATA_CACHE, "2nd-level cache", 4 * 1024, 8, 64, -1, 2}, - 0x2C: {1, DATA_CACHE, "1st-level cache", 32, 8, 64, -1, 0}, - 0x30: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 32, 8, 64, -1, 0}, - 0x40: {-1, DATA_CACHE, "No 2nd-level cache or, if processor contains a " + - "valid 2nd-level cache, no 3rd-level cache", -1, -1, -1, -1, 0}, - 0x41: {2, DATA_CACHE, "2nd-level cache", 128, 4, 32, -1, 0}, - 0x42: {2, DATA_CACHE, "2nd-level cache", 256, 4, 32, -1, 0}, - 0x43: {2, DATA_CACHE, "2nd-level cache", 512, 4, 32, -1, 0}, - 0x44: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 4, 32, -1, 0}, - 0x45: {2, DATA_CACHE, "2nd-level cache", 2 * 1024, 4, 32, -1, 0}, - 0x46: {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 4, 64, -1, 0}, - 0x47: {3, DATA_CACHE, "3nd-level cache", 8 * 1024, 8, 64, -1, 0}, - 0x48: {2, DATA_CACHE, "2nd-level cache", 3 * 1024, 12, 64, -1, 0}, - 0x49: {2, DATA_CACHE, "2nd-level cache", 4 * 1024, 16, 64, -1, 0}, - // (Intel Xeon processor MP, Family 0FH, Model 06H) - (0x49 | (1 << 8)): {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 16, 64, -1, 0}, - 0x4A: {3, DATA_CACHE, "3nd-level cache", 6 * 1024, 12, 64, -1, 0}, - 0x4B: {3, DATA_CACHE, "3nd-level cache", 8 * 1024, 16, 64, -1, 0}, - 0x4C: {3, DATA_CACHE, "3nd-level cache", 12 * 1024, 12, 64, -1, 0}, - 0x4D: {3, DATA_CACHE, "3nd-level cache", 16 * 1024, 16, 64, -1, 0}, - 0x4E: {2, DATA_CACHE, "3nd-level cache", 6 * 1024, 24, 64, -1, 0}, - 0x4F: {-1, TLB, "Instruction TLB", 4, -1, -1, 32, 0}, - 0x50: {-1, TLB, "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages", 4, -1, -1, 64, 0}, - 0x51: {-1, TLB, "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages", 4, -1, -1, 128, 0}, - 0x52: {-1, TLB, "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages", 4, -1, -1, 256, 0}, - 0x55: {-1, TLB, "Instruction TLB: 2-MByte or 4-MByte pages", 2 * 1024, 0xFF, -1, 7, 0}, - 0x56: {-1, TLB, "Data TLB0", 4 * 1024, 4, -1, 16, 0}, - 0x57: {-1, TLB, "Data TLB0", 4, 4, -1, 16, 0}, - 0x59: {-1, TLB, "Data TLB0", 4, 0xFF, -1, 16, 0}, - 0x5A: {-1, TLB, "Data TLB0 2-MByte or 4 MByte pages", 2 * 1024, 4, -1, 32, 0}, - 0x5B: {-1, TLB, "Data TLB 4 KByte and 4 MByte pages", 4, -1, -1, 64, 0}, - 0x5C: {-1, TLB, "Data TLB 4 KByte and 4 MByte pages", 4, -1, -1, 128, 0}, - 0x5D: {-1, TLB, "Data TLB 4 KByte and 4 MByte pages", 4, -1, -1, 256, 0}, - 0x60: {1, DATA_CACHE, "1st-level data cache", 16, 8, 64, -1, 0}, - 0x61: {-1, TLB, "Instruction TLB", 4, 0xFF, -1, 48, 0}, - 0x63: {-1, TLB, "Data TLB", 1 * 1024 * 1024, 4, -1, 4, 0}, - 0x66: {1, DATA_CACHE, "1st-level data cache", 8, 4, 64, -1, 0}, - 0x67: {1, DATA_CACHE, "1st-level data cache", 16, 4, 64, -1, 0}, - 0x68: {1, DATA_CACHE, "1st-level data cache", 32, 4, 64, -1, 0}, - 0x70: {1, INSTRUCTION_CACHE, "Trace cache (size in K of uop)", 12, 8, -1, -1, 0}, - 0x71: {1, INSTRUCTION_CACHE, "Trace cache (size in K of uop)", 16, 8, -1, -1, 0}, - 0x72: {1, INSTRUCTION_CACHE, "Trace cache (size in K of uop)", 32, 8, -1, -1, 0}, - 0x76: {-1, TLB, "Instruction TLB: 2M/4M pages", 2 * 1024, 0xFF, -1, 8, 0}, - 0x78: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 4, 64, -1, 0}, - 0x79: {2, DATA_CACHE, "2nd-level cache", 128, 8, 64, -1, 2}, - 0x7A: {2, DATA_CACHE, "2nd-level cache", 256, 8, 64, -1, 2}, - 0x7B: {2, DATA_CACHE, "2nd-level cache", 512, 8, 64, -1, 2}, - 0x7C: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 8, 64, -1, 2}, - 0x7D: {2, DATA_CACHE, "2nd-level cache", 2 * 1024, 8, 64, -1, 0}, - 0x7F: {2, DATA_CACHE, "2nd-level cache", 512, 2, 64, -1, 0}, - 0x80: {2, DATA_CACHE, "2nd-level cache", 512, 8, 64, -1, 0}, - 0x82: {2, DATA_CACHE, "2nd-level cache", 256, 8, 32, -1, 0}, - 0x83: {2, DATA_CACHE, "2nd-level cache", 512, 8, 32, -1, 0}, - 0x84: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 8, 32, -1, 0}, - 0x85: {2, DATA_CACHE, "2nd-level cache", 2 * 1024, 8, 32, -1, 0}, - 0x86: {2, DATA_CACHE, "2nd-level cache", 512, 4, 32, -1, 0}, - 0x87: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 8, 64, -1, 0}, - 0xA0: {-1, DTLB, "DTLB", 4, 0xFF, -1, 32, 0}, - 0xB0: {-1, TLB, "Instruction TLB", 4, 4, -1, 128, 0}, - 0xB1: {-1, TLB, "Instruction TLB 2M pages 4 way 8 entries or" + - "4M pages 4-way, 4 entries", 2 * 1024, 4, -1, 8, 0}, - 0xB2: {-1, TLB, "Instruction TLB", 4, 4, -1, 64, 0}, - 0xB3: {-1, TLB, "Data TLB", 4, 4, -1, 128, 0}, - 0xB4: {-1, TLB, "Data TLB1", 4, 4, -1, 256, 0}, - 0xB5: {-1, TLB, "Instruction TLB", 4, 8, -1, 64, 0}, - 0xB6: {-1, TLB, "Instruction TLB", 4, 8, -1, 128, 0}, - 0xBA: {-1, TLB, "Data TLB1", 4, 4, -1, 64, 0}, - 0xC0: {-1, TLB, "Data TLB: 4 KByte and 4 MByte pages", 4, 4, -1, 8, 0}, - 0xC1: {-1, STLB, "Shared 2nd-Level TLB: 4Kbyte and 2Mbyte pages", 4, 8, -1, 1024, 0}, - 0xC2: {-1, DTLB, "DTLB 4KByte/2 MByte pages", 4, 4, -1, 16, 0}, - 0xC3: {-1, STLB, "Shared 2nd-Level TLB: " + - "4 KByte /2 MByte pages, 6-way associative, 1536 entries." + - "Also 1GBbyte pages, 4-way,16 entries.", 4, 6, -1, 1536, 0}, - 0xCA: {-1, STLB, "Shared 2nd-Level TLB", 4, 4, -1, 512, 0}, - 0xD0: {3, DATA_CACHE, "3nd-level cache", 512, 4, 64, -1, 0}, - 0xD1: {3, DATA_CACHE, "3nd-level cache", 1 * 1024, 4, 64, -1, 0}, - 0xD2: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 4, 64, -1, 0}, - 0xD6: {3, DATA_CACHE, "3nd-level cache", 1 * 1024, 8, 64, -1, 0}, - 0xD7: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 8, 64, -1, 0}, - 0xD8: {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 8, 64, -1, 0}, - 0xDC: {3, DATA_CACHE, "3nd-level cache", 1 * 1536, 12, 64, -1, 0}, - 0xDD: {3, DATA_CACHE, "3nd-level cache", 3 * 1024, 12, 64, -1, 0}, - 0xDE: {3, DATA_CACHE, "3nd-level cache", 6 * 1024, 12, 64, -1, 0}, - 0xE2: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 16, 64, -1, 0}, - 0xE3: {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 16, 64, -1, 0}, - 0xE4: {3, DATA_CACHE, "3nd-level cache", 8 * 1024, 16, 64, -1, 0}, - 0xEA: {3, DATA_CACHE, "3nd-level cache", 12 * 1024, 24, 64, -1, 0}, - 0xEB: {3, DATA_CACHE, "3nd-level cache", 18 * 1024, 24, 64, -1, 0}, - 0xEC: {3, DATA_CACHE, "3nd-level cache", 24 * 1024, 24, 64, -1, 0}, - 0xF0: {-1, PREFETCH, "", 64, -1, -1, -1, 0}, - 0xF1: {-1, PREFETCH, "", 128, -1, -1, -1, 0}, - 0xFF: {-1, NULL, "CPUID leaf 2 does not report cache descriptor " + - "information, use CPUID leaf 4 to query cache parameters", - -1, -1, -1, -1, 0}, -} - -func int32sToBytes(args ...uint32) []byte { - var result []byte - - for _, arg := range args { - result = append(result, - byte((arg)&0xFF), - byte((arg>>8)&0xFF), - byte((arg>>16)&0xFF), - byte((arg>>24)&0xFF)) - } - - return result -} diff --git a/vendor/github.com/intel-go/cpuid/cpuidlow_amd64.s b/vendor/github.com/intel-go/cpuid/cpuidlow_amd64.s deleted file mode 100644 index 20da845a6a63..000000000000 --- a/vendor/github.com/intel-go/cpuid/cpuidlow_amd64.s +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2017 Intel Corporation. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -#include "textflag.h" - -// func cpuid_low(arg1, arg2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid_low(SB),NOSPLIT,$0-24 - MOVL arg1+0(FP), AX - MOVL arg2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET -// func xgetbv_low(arg1 uint32) (eax, edx uint32) - TEXT ·xgetbv_low(SB),NOSPLIT,$0-16 - MOVL arg1+0(FP), CX - BYTE $0x0F - BYTE $0x01 - BYTE $0xD0 - MOVL AX,eax+8(FP) - MOVL DX,edx+12(FP) - RET diff --git a/vendor/github.com/jimmidyson/go-download/LICENSE b/vendor/github.com/jimmidyson/go-download/LICENSE deleted file mode 100644 index f3a588aad965..000000000000 --- a/vendor/github.com/jimmidyson/go-download/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (C) 2016 Red Hat, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/jimmidyson/go-download/checksum_validator.go b/vendor/github.com/jimmidyson/go-download/checksum_validator.go deleted file mode 100644 index f21d15fe98ec..000000000000 --- a/vendor/github.com/jimmidyson/go-download/checksum_validator.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package download - -import ( - "bufio" - "bytes" - "encoding/hex" - "hash" - "io" - "net/http" - "net/url" - "os" - "strings" - - "github.com/pkg/errors" -) - -type checksumValidator interface { - io.Writer - validate() bool -} - -func newValidator(hasher hash.Hash, client *http.Client, checksum, filename string) (checksumValidator, error) { - if u, err := url.Parse(checksum); err == nil && len(u.Scheme) != 0 { - if u.Scheme == "http" || u.Scheme == "https" { - return newValidatorFromChecksumURL(hasher, client, checksum, filename) - } - - return nil, errors.Errorf("unsupported scheme: %s (supported schemes: %v)", u.Scheme, []string{"http", "https"}) - } - - if _, err := hex.DecodeString(checksum); err == nil { - return &validator{ - hasher: hasher, - checksum: checksum, - }, nil - } - - if f, err := os.Open(checksum); err == nil { - defer func() { _ = f.Close() }() // #nosec - return newValidatorFromReader(hasher, f, filename) - } - - return nil, errors.New("invalid checksum: must be one of hex encoded checksum, URL or file path") -} - -func newValidatorFromChecksumURL(hasher hash.Hash, client *http.Client, checksumURL, filename string) (checksumValidator, error) { - resp, err := client.Get(checksumURL) - if err != nil { - return nil, errors.Wrap(err, "failed to download checksum file") - } - defer func() { _ = resp.Body.Close() }() // #nosec - - if resp.StatusCode != http.StatusOK { - return nil, errors.Errorf("failed to download checksum file: received status code %d", resp.StatusCode) - } - - return newValidatorFromReader(hasher, resp.Body, filename) -} - -func newValidatorFromReader(hasher hash.Hash, reader io.Reader, filename string) (checksumValidator, error) { - scanner := bufio.NewScanner(reader) - var b bytes.Buffer - for scanner.Scan() { - line := scanner.Text() - spl := strings.Fields(line) - if len(spl) == 2 { - if spl[1] == filename { - trimmedHash := strings.TrimSpace(spl[0]) - if _, err := hex.DecodeString(trimmedHash); err == nil { - return &validator{ - hasher: hasher, - checksum: trimmedHash, - }, nil - } - } - } - if b.Len() == 0 { - _, _ = b.WriteString(line) // #nosec - } - } - buf := b.String() - if len(buf) > 0 { - trimmedHash := strings.TrimSpace(buf) - if _, err := hex.DecodeString(trimmedHash); err == nil { - return &validator{ - hasher: hasher, - checksum: trimmedHash, - }, nil - } - } - - return nil, errors.New("failed to retrieve checksum") -} - -var _ checksumValidator = &validator{} - -type validator struct { - hasher hash.Hash - checksum string -} - -func (v *validator) validate() bool { - return hex.EncodeToString(v.hasher.Sum(nil)) == v.checksum -} - -func (v *validator) Write(p []byte) (n int, err error) { - return v.hasher.Write(p) -} diff --git a/vendor/github.com/jimmidyson/go-download/download.go b/vendor/github.com/jimmidyson/go-download/download.go deleted file mode 100644 index 272c44cac677..000000000000 --- a/vendor/github.com/jimmidyson/go-download/download.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2016 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package download - -import ( - "crypto" - "crypto/md5" // #nosec - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "fmt" - "hash" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "time" - - "github.com/pkg/errors" - pb "gopkg.in/cheggaaa/pb.v1" -) - -// Options holds the possible configuration options for the Downloader. -type Options struct { - // HTTPClient is an optional client to perform downloads with. If nil, `http.DefaultClient` - // will be used. - HTTPClient *http.Client - // Checksum is either a checksum string, or a URL or path to a file containing the checksum. The file - // can either contain the checksum only or contain multiple lines of the format: - // CHECKSUM FILENAME - Checksum string - // Checksum hash is the hash for the checksum. Currently only supports SHA1, SHA256, SHA512 and MD5. - // If unspecified, defaults to SHA256. - ChecksumHash crypto.Hash - // ProgressBars is the configuration of progress bars output. Set to `nil` (default) to disable. - ProgressBars *ProgressBarOptions - // Retries is the number of retries for retriable errors. Defaults to 5 if unset. Set to -1 for - // infinite retries. - Retries int - // RetryInterval is the interval between retries. - RetryInterval time.Duration -} - -// FileOptions holds the possible configuration options to download to a file. -type FileOptions struct { - // Options is the common set of downloader options. - Options - // Mkdirs is the option to create parent directories of target directory if they don't - // exist. Use `download.MkdirAll` or `download.MkdirNone` (or any `*bool`). Defaults to - // `download.MkdirAll`. - Mkdirs Mkdirs -} - -// ProgressBarOptions holds the configuration for progress bars if required. -type ProgressBarOptions struct { - // Writer holds where to output the progress bars to. Defaults to `os.Stdout`. - Writer io.Writer - // Width is the maximum width of the progress bar. If output to a narrower terminal then this - // will be ignored. - MaxWidth int -} - -func newBool(b bool) *bool { - return &b -} - -// Mkdirs is a custom type so we can differentiate between not specified (nil) -// and set. -type Mkdirs *bool - -var ( - // MkdirAll is used to create all intermediate directories if required. - MkdirAll = Mkdirs(newBool(true)) - // MkdirNone is used to create no intermediate directories. - MkdirNone = Mkdirs(newBool(false)) -) - -// ToFile downloads the specified `src` URL to `dest` file using -// the specified `FileOptions`. -func ToFile(src, dest string, options FileOptions) error { - u, err := url.Parse(src) - if err != nil { - return errors.Wrap(err, "invalid src URL") - } - - targetDir := filepath.Dir(dest) - if err = createDir(targetDir, options.Mkdirs == nil || *options.Mkdirs); err != nil { - return err - } - - targetName := filepath.Base(dest) - f, err := ioutil.TempFile(targetDir, ".tmp-"+targetName) - if err != nil { - return errors.Wrap(err, "failed to create temp file") - } - - err = downloadFile(u, f, options.Options) - if err != nil { - _ = f.Close() // #nosec - _ = os.Remove(f.Name()) // #nosec - return errors.Wrap(err, "failed to download") - } - err = f.Close() - if err != nil { - _ = os.Remove(f.Name()) // #nosec - return errors.Wrap(err, "failed to close temp file") - } - - if err = renameFile(f.Name(), dest); err != nil { - return err - } - - return nil -} - -func renameFile(src, dest string) error { - err := os.Rename(src, dest) - if err != nil { - // Rename failed, try to copy file. - destF, err := os.Create(dest) - if err != nil { - return errors.Wrap(err, "failed to create target file") - } - f, err := os.Open(src) - if err != nil { - _ = os.Remove(dest) - return errors.Wrap(err, "failed to open source file") - } - defer func() { - _ = f.Close() // #nosec - _ = os.Remove(f.Name()) // #nosec - }() - _, err = io.Copy(destF, f) - if err != nil { - _ = destF.Close() - _ = os.Remove(destF.Name()) - return errors.Wrap(err, "failed to rename temp file to destination") - } - err = destF.Close() - if err != nil { - _ = os.Remove(destF.Name()) - return errors.Wrap(err, "failed to rename temp file to destination") - } - } - - return nil -} - -func createDir(dir string, mkdirs bool) error { - if _, err := os.Stat(dir); err != nil { - if !os.IsNotExist(err) { - return errors.Wrap(err, "failed to check destination directory") - } - if !mkdirs { - return errors.Errorf("directory %s does not exist", dir) - } - err = os.MkdirAll(dir, 0700) - if err != nil { - return errors.Wrap(err, "failed to create destination directory") - } - } - - return nil -} - -func downloadFile(u *url.URL, f *os.File, options Options) error { - err := FromURL(u, f, options) - if err != nil { - return errors.Wrap(err, "failed to download to temp file") - } - - return nil -} - -// ToWriter downloads the specified `src` URL to `w` writer using -// the specified `Options`. -func ToWriter(src string, w io.Writer, options Options) error { - u, err := url.Parse(src) - if err != nil { - return errors.Wrap(err, "invalid src URL") - } - return FromURL(u, w, options) -} - -// FromURL downloads the specified `src` URL to `w` writer using -// the specified `Options`. -func FromURL(src *url.URL, w io.Writer, options Options) error { - httpClient := getHTTPClient(options) - var ( - err error - resp *http.Response - ) - downloader := func() error { - resp, err = httpClient.Get(src.String()) - if err != nil { - return &retriableError{errors.Wrap(err, "Temporary download error")} - } - if resp.StatusCode != http.StatusOK { - defer func() { _ = resp.Body.Close() }() // #nosec - return errors.Errorf("received invalid status code: %d (expected %d)", resp.StatusCode, http.StatusOK) - } - return nil - } - retries := options.Retries - if retries == 0 { - retries = 5 - } - if err = retryAfter(retries, downloader, options.RetryInterval); err != nil { - return errors.Wrap(err, "download failed") - } - defer func() { _ = resp.Body.Close() }() // #nosec - - var ( - validator checksumValidator - - reader io.Reader = resp.Body - ) - - if options.ProgressBars != nil && resp.ContentLength > 0 { - bar := newProgressBar(resp.ContentLength, options.ProgressBars.MaxWidth, options.ProgressBars.Writer) - bar.Start() - reader = bar.NewProxyReader(reader) - defer func() { - <-time.After(bar.RefreshRate) - fmt.Println() - }() - } - - validator, reader, err = createValidatorReader(reader, options.ChecksumHash, httpClient, options.Checksum, path.Base(src.Path)) - if err != nil { - return err - } - - if _, err = io.Copy(w, reader); err != nil { - return errors.Wrap(err, "failed to copy contents") - } - - if !validator.validate() { - return errors.New("checksum validation failed") - } - - return nil -} - -func createValidatorReader(reader io.Reader, hashType crypto.Hash, httpClient *http.Client, checksum, filename string) (checksumValidator, io.Reader, error) { - validator, err := createValidator(hashType, httpClient, checksum, filename) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create validator") - } - return validator, io.TeeReader(reader, validator), nil -} - -type noopValidator struct { -} - -func (*noopValidator) validate() bool { - return true -} - -func (*noopValidator) Write(p []byte) (n int, err error) { - return ioutil.Discard.Write(p) -} - -var _ checksumValidator = &noopValidator{} - -func createValidator(hashType crypto.Hash, httpClient *http.Client, checksum, filename string) (checksumValidator, error) { - if len(checksum) == 0 { - return &noopValidator{}, nil - } - var hasher hash.Hash - switch hashType { - case crypto.SHA256, 0: - hasher = sha256.New() - case crypto.SHA1: - hasher = sha1.New() - case crypto.SHA512: - hasher = sha512.New() - case crypto.MD5: - hasher = md5.New() // #nosec - default: - return nil, errors.New("invalid hash function") - } - - validator, err := newValidator(hasher, httpClient, checksum, filename) - if err != nil { - return nil, errors.Wrap(err, "failed to create validator") - } - - return validator, nil -} - -func getHTTPClient(options Options) *http.Client { - httpClient := options.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - return httpClient -} - -func getBarWriter(w io.Writer) io.Writer { - if w == nil { - w = os.Stdout - } - return w -} - -func newProgressBar(length int64, maxWidth int, w io.Writer) *pb.ProgressBar { - bar := pb.New64(length).SetUnits(pb.U_BYTES) - if maxWidth > 0 { - bar.SetMaxWidth(maxWidth) - } - barWriter := getBarWriter(w) - bar.Output = barWriter - return bar -} diff --git a/vendor/github.com/jimmidyson/go-download/retries.go b/vendor/github.com/jimmidyson/go-download/retries.go deleted file mode 100644 index 219df04f6cb9..000000000000 --- a/vendor/github.com/jimmidyson/go-download/retries.go +++ /dev/null @@ -1,34 +0,0 @@ -package download - -import ( - "time" - - multierror "github.com/hashicorp/go-multierror" -) - -type retriableError struct { - err error -} - -func (e *retriableError) Error() string { - return e.err.Error() -} - -func retryAfter(attempts int, callback func() error, d time.Duration) error { - var res *multierror.Error - if attempts == -1 { - attempts = ^int(0) - } - for i := 0; i < attempts; i++ { - err := callback() - if err == nil { - return nil - } - res = multierror.Append(res, err) - if _, ok := err.(*retriableError); !ok { - return res - } - <-time.After(d) - } - return res.ErrorOrNil() -} diff --git a/vendor/github.com/johanneswuerbach/nfsexports/LICENSE b/vendor/github.com/johanneswuerbach/nfsexports/LICENSE deleted file mode 100644 index 02e03357c0a4..000000000000 --- a/vendor/github.com/johanneswuerbach/nfsexports/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Johannes Würbach - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/johanneswuerbach/nfsexports/nfsexports.go b/vendor/github.com/johanneswuerbach/nfsexports/nfsexports.go deleted file mode 100644 index 68d2e69f50b8..000000000000 --- a/vendor/github.com/johanneswuerbach/nfsexports/nfsexports.go +++ /dev/null @@ -1,124 +0,0 @@ -package nfsexports - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" -) - -const ( - defaultExportsFile = "/etc/exports" -) - -// Add export, if exportsFile is an empty string /etc/exports is used -func Add(exportsFile string, identifier string, export string) ([]byte, error) { - if exportsFile == "" { - exportsFile = defaultExportsFile - } - - exports, err := ioutil.ReadFile(exportsFile) - - if err != nil { - if os.IsNotExist(err) { - exports = []byte{} - } else { - return nil, err - } - } - - if containsExport(exports, identifier) { - return exports, nil - } - - newExports := exports - if len(newExports) > 0 && !bytes.HasSuffix(exports, []byte("\n")) { - newExports = append(newExports, '\n') - } - - newExports = append(newExports, []byte(exportEntry(identifier, export))...) - - if err := verifyNewExports(newExports); err != nil { - return nil, err - } - - if err := ioutil.WriteFile(exportsFile, newExports, 0644); err != nil { - return nil, err - } - - return newExports, nil -} - -// Remove export, if exportsFile is an empty string /etc/exports is used -func Remove(exportsFile string, identifier string) ([]byte, error) { - if exportsFile == "" { - exportsFile = defaultExportsFile - } - - exports, err := ioutil.ReadFile(exportsFile) - if err != nil { - return nil, err - } - - beginMark := []byte(fmt.Sprintf("# BEGIN: %s", identifier)) - endMark := []byte(fmt.Sprintf("# END: %s\n", identifier)) - - begin := bytes.Index(exports, beginMark) - end := bytes.Index(exports, endMark) - - if begin == -1 || end == -1 { - return nil, fmt.Errorf("Couldn't not find export %s in %s", identifier, exportsFile) - } - - newExports := append(exports[:begin], exports[end+len(endMark):]...) - newExports = append(bytes.TrimSpace(newExports), '\n') - - if err := ioutil.WriteFile(exportsFile, newExports, 0644); err != nil { - return nil, err - } - - return newExports, nil -} - -// ReloadDaemon reload NFS daemon -func ReloadDaemon() error { - cmd := exec.Command("sudo", "nfsd", "update") - cmd.Stderr = &bytes.Buffer{} - - if err := cmd.Run(); err != nil { - return fmt.Errorf("Reloading nfds failed: %s\n%s", err.Error(), cmd.Stderr) - } - - return nil -} - -func containsExport(exports []byte, identifier string) bool { - return bytes.Contains(exports, []byte(fmt.Sprintf("# BEGIN: %s\n", identifier))) -} - -func exportEntry(identifier string, export string) string { - return fmt.Sprintf("# BEGIN: %s\n%s\n# END: %s\n", identifier, export, identifier) -} - -func verifyNewExports(newExports []byte) error { - tmpFile, err := ioutil.TempFile("", "exports") - if err != nil { - return err - } - defer tmpFile.Close() - - if _, err := tmpFile.Write(newExports); err != nil { - return err - } - tmpFile.Close() - - cmd := exec.Command("nfsd", "-F", tmpFile.Name(), "checkexports") - cmd.Stderr = &bytes.Buffer{} - - if err := cmd.Run(); err != nil { - return fmt.Errorf("Export verification failed:\n%s\n%s", cmd.Stderr, err.Error()) - } - - return nil -} diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE deleted file mode 100644 index 2cf4f5ab28e9..000000000000 --- a/vendor/github.com/json-iterator/go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 json-iterator - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go deleted file mode 100644 index e674d0f397ed..000000000000 --- a/vendor/github.com/json-iterator/go/adapter.go +++ /dev/null @@ -1,150 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -// UnmarshalFromString convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { - if !adapter.iter.loadMore() { - return io.EOF - } - } - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - iter := adapter.iter - if iter.Error != nil { - return false - } - c := iter.nextToken() - if c == 0 { - return false - } - iter.unreadByte() - return c != ']' && c != '}' -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// Number instead of as a float64. -func (adapter *Decoder) UseNumber() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.UseNumber = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// DisallowUnknownFields causes the Decoder to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -func (adapter *Decoder) DisallowUnknownFields() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.DisallowUnknownFields = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.WriteRaw("\n") - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - config := adapter.stream.cfg.configBeforeFrozen - config.IndentionStep = len(indent) - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return ConfigDefault.Valid(data) -} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go deleted file mode 100644 index daecfed615e7..000000000000 --- a/vendor/github.com/json-iterator/go/any.go +++ /dev/null @@ -1,321 +0,0 @@ -package jsoniter - -import ( - "errors" - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "strconv" - "unsafe" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect2.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - if strconv.IntSize == 32 { - return WrapInt32(int32(val.(int))) - } - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - if strconv.IntSize == 32 { - return WrapUint32(uint32(val.(uint))) - } - return WrapUint64(uint64(val.(uint))) - case reflect.Uintptr: - if ptrSize == 32 { - return WrapUint32(uint32(val.(uintptr))) - } - return WrapUint64(uint64(val.(uintptr))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - case 0: - return &invalidAny{baseAny{}, errors.New("input is empty")} - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} - -var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() - -func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -type anyCodec struct { - valType reflect2.Type -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - panic("not implemented") -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - any.WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - return any.Size() == 0 -} - -type directAnyCodec struct { -} - -func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *(*Any)(ptr) = iter.readAny() -} - -func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - any := *(*Any)(ptr) - any.WriteTo(stream) -} - -func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { - any := *(*Any)(ptr) - return any.Size() == 0 -} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go deleted file mode 100644 index 0449e9aa428a..000000000000 --- a/vendor/github.com/json-iterator/go/any_array.go +++ /dev/null @@ -1,278 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type arrayLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *arrayLazyAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayLazyAny) MustBeValid() Any { - return any -} - -func (any *arrayLazyAny) LastError() error { - return any.err -} - -func (any *arrayLazyAny) ToBool() bool { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.ReadArray() -} - -func (any *arrayLazyAny) ToInt() int { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt32() int32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt64() int64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint() uint { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint32() uint32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint64() uint64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat32() float32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat64() float64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *arrayLazyAny) ToVal(val interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(val) -} - -func (any *arrayLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateArrayElement(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - arr := make([]Any, 0) - iter.ReadArrayCB(func(iter *Iterator) bool { - found := iter.readAny().Get(path[1:]...) - if found.ValueType() != InvalidValue { - arr = append(arr, found) - } - return true - }) - return wrapArray(arr) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadArrayCB(func(iter *Iterator) bool { - size++ - iter.Skip() - return true - }) - return size -} - -func (any *arrayLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *arrayLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type arrayAny struct { - baseAny - val reflect.Value -} - -func wrapArray(val interface{}) *arrayAny { - return &arrayAny{baseAny{}, reflect.ValueOf(val)} -} - -func (any *arrayAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayAny) MustBeValid() Any { - return any -} - -func (any *arrayAny) LastError() error { - return nil -} - -func (any *arrayAny) ToBool() bool { - return any.val.Len() != 0 -} - -func (any *arrayAny) ToInt() int { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt32() int32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt64() int64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint() uint { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint32() uint32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint64() uint64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat32() float32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat64() float64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToString() string { - str, _ := MarshalToString(any.val.Interface()) - return str -} - -func (any *arrayAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - if firstPath < 0 || firstPath >= any.val.Len() { - return newInvalidAny(path) - } - return Wrap(any.val.Index(firstPath).Interface()) - case int32: - if '*' == firstPath { - mappedAll := make([]Any, 0) - for i := 0; i < any.val.Len(); i++ { - mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll = append(mappedAll, mapped) - } - } - return wrapArray(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayAny) Size() int { - return any.val.Len() -} - -func (any *arrayAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *arrayAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go deleted file mode 100644 index 9452324af5b1..000000000000 --- a/vendor/github.com/json-iterator/go/any_bool.go +++ /dev/null @@ -1,137 +0,0 @@ -package jsoniter - -type trueAny struct { - baseAny -} - -func (any *trueAny) LastError() error { - return nil -} - -func (any *trueAny) ToBool() bool { - return true -} - -func (any *trueAny) ToInt() int { - return 1 -} - -func (any *trueAny) ToInt32() int32 { - return 1 -} - -func (any *trueAny) ToInt64() int64 { - return 1 -} - -func (any *trueAny) ToUint() uint { - return 1 -} - -func (any *trueAny) ToUint32() uint32 { - return 1 -} - -func (any *trueAny) ToUint64() uint64 { - return 1 -} - -func (any *trueAny) ToFloat32() float32 { - return 1 -} - -func (any *trueAny) ToFloat64() float64 { - return 1 -} - -func (any *trueAny) ToString() string { - return "true" -} - -func (any *trueAny) WriteTo(stream *Stream) { - stream.WriteTrue() -} - -func (any *trueAny) Parse() *Iterator { - return nil -} - -func (any *trueAny) GetInterface() interface{} { - return true -} - -func (any *trueAny) ValueType() ValueType { - return BoolValue -} - -func (any *trueAny) MustBeValid() Any { - return any -} - -type falseAny struct { - baseAny -} - -func (any *falseAny) LastError() error { - return nil -} - -func (any *falseAny) ToBool() bool { - return false -} - -func (any *falseAny) ToInt() int { - return 0 -} - -func (any *falseAny) ToInt32() int32 { - return 0 -} - -func (any *falseAny) ToInt64() int64 { - return 0 -} - -func (any *falseAny) ToUint() uint { - return 0 -} - -func (any *falseAny) ToUint32() uint32 { - return 0 -} - -func (any *falseAny) ToUint64() uint64 { - return 0 -} - -func (any *falseAny) ToFloat32() float32 { - return 0 -} - -func (any *falseAny) ToFloat64() float64 { - return 0 -} - -func (any *falseAny) ToString() string { - return "false" -} - -func (any *falseAny) WriteTo(stream *Stream) { - stream.WriteFalse() -} - -func (any *falseAny) Parse() *Iterator { - return nil -} - -func (any *falseAny) GetInterface() interface{} { - return false -} - -func (any *falseAny) ValueType() ValueType { - return BoolValue -} - -func (any *falseAny) MustBeValid() Any { - return any -} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go deleted file mode 100644 index 35fdb09497fa..000000000000 --- a/vendor/github.com/json-iterator/go/any_float.go +++ /dev/null @@ -1,83 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type floatAny struct { - baseAny - val float64 -} - -func (any *floatAny) Parse() *Iterator { - return nil -} - -func (any *floatAny) ValueType() ValueType { - return NumberValue -} - -func (any *floatAny) MustBeValid() Any { - return any -} - -func (any *floatAny) LastError() error { - return nil -} - -func (any *floatAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *floatAny) ToInt() int { - return int(any.val) -} - -func (any *floatAny) ToInt32() int32 { - return int32(any.val) -} - -func (any *floatAny) ToInt64() int64 { - return int64(any.val) -} - -func (any *floatAny) ToUint() uint { - if any.val > 0 { - return uint(any.val) - } - return 0 -} - -func (any *floatAny) ToUint32() uint32 { - if any.val > 0 { - return uint32(any.val) - } - return 0 -} - -func (any *floatAny) ToUint64() uint64 { - if any.val > 0 { - return uint64(any.val) - } - return 0 -} - -func (any *floatAny) ToFloat32() float32 { - return float32(any.val) -} - -func (any *floatAny) ToFloat64() float64 { - return any.val -} - -func (any *floatAny) ToString() string { - return strconv.FormatFloat(any.val, 'E', -1, 64) -} - -func (any *floatAny) WriteTo(stream *Stream) { - stream.WriteFloat64(any.val) -} - -func (any *floatAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go deleted file mode 100644 index 1b56f399150d..000000000000 --- a/vendor/github.com/json-iterator/go/any_int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int32Any struct { - baseAny - val int32 -} - -func (any *int32Any) LastError() error { - return nil -} - -func (any *int32Any) ValueType() ValueType { - return NumberValue -} - -func (any *int32Any) MustBeValid() Any { - return any -} - -func (any *int32Any) ToBool() bool { - return any.val != 0 -} - -func (any *int32Any) ToInt() int { - return int(any.val) -} - -func (any *int32Any) ToInt32() int32 { - return any.val -} - -func (any *int32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *int32Any) ToUint() uint { - return uint(any.val) -} - -func (any *int32Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *int32Any) WriteTo(stream *Stream) { - stream.WriteInt32(any.val) -} - -func (any *int32Any) Parse() *Iterator { - return nil -} - -func (any *int32Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go deleted file mode 100644 index c440d72b6d3a..000000000000 --- a/vendor/github.com/json-iterator/go/any_int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int64Any struct { - baseAny - val int64 -} - -func (any *int64Any) LastError() error { - return nil -} - -func (any *int64Any) ValueType() ValueType { - return NumberValue -} - -func (any *int64Any) MustBeValid() Any { - return any -} - -func (any *int64Any) ToBool() bool { - return any.val != 0 -} - -func (any *int64Any) ToInt() int { - return int(any.val) -} - -func (any *int64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *int64Any) ToInt64() int64 { - return any.val -} - -func (any *int64Any) ToUint() uint { - return uint(any.val) -} - -func (any *int64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int64Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int64Any) ToString() string { - return strconv.FormatInt(any.val, 10) -} - -func (any *int64Any) WriteTo(stream *Stream) { - stream.WriteInt64(any.val) -} - -func (any *int64Any) Parse() *Iterator { - return nil -} - -func (any *int64Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go deleted file mode 100644 index 1d859eac3274..000000000000 --- a/vendor/github.com/json-iterator/go/any_invalid.go +++ /dev/null @@ -1,82 +0,0 @@ -package jsoniter - -import "fmt" - -type invalidAny struct { - baseAny - err error -} - -func newInvalidAny(path []interface{}) *invalidAny { - return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} -} - -func (any *invalidAny) LastError() error { - return any.err -} - -func (any *invalidAny) ValueType() ValueType { - return InvalidValue -} - -func (any *invalidAny) MustBeValid() Any { - panic(any.err) -} - -func (any *invalidAny) ToBool() bool { - return false -} - -func (any *invalidAny) ToInt() int { - return 0 -} - -func (any *invalidAny) ToInt32() int32 { - return 0 -} - -func (any *invalidAny) ToInt64() int64 { - return 0 -} - -func (any *invalidAny) ToUint() uint { - return 0 -} - -func (any *invalidAny) ToUint32() uint32 { - return 0 -} - -func (any *invalidAny) ToUint64() uint64 { - return 0 -} - -func (any *invalidAny) ToFloat32() float32 { - return 0 -} - -func (any *invalidAny) ToFloat64() float64 { - return 0 -} - -func (any *invalidAny) ToString() string { - return "" -} - -func (any *invalidAny) WriteTo(stream *Stream) { -} - -func (any *invalidAny) Get(path ...interface{}) Any { - if any.err == nil { - return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} - } - return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} -} - -func (any *invalidAny) Parse() *Iterator { - return nil -} - -func (any *invalidAny) GetInterface() interface{} { - return nil -} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go deleted file mode 100644 index d04cb54c11c1..000000000000 --- a/vendor/github.com/json-iterator/go/any_nil.go +++ /dev/null @@ -1,69 +0,0 @@ -package jsoniter - -type nilAny struct { - baseAny -} - -func (any *nilAny) LastError() error { - return nil -} - -func (any *nilAny) ValueType() ValueType { - return NilValue -} - -func (any *nilAny) MustBeValid() Any { - return any -} - -func (any *nilAny) ToBool() bool { - return false -} - -func (any *nilAny) ToInt() int { - return 0 -} - -func (any *nilAny) ToInt32() int32 { - return 0 -} - -func (any *nilAny) ToInt64() int64 { - return 0 -} - -func (any *nilAny) ToUint() uint { - return 0 -} - -func (any *nilAny) ToUint32() uint32 { - return 0 -} - -func (any *nilAny) ToUint64() uint64 { - return 0 -} - -func (any *nilAny) ToFloat32() float32 { - return 0 -} - -func (any *nilAny) ToFloat64() float64 { - return 0 -} - -func (any *nilAny) ToString() string { - return "" -} - -func (any *nilAny) WriteTo(stream *Stream) { - stream.WriteNil() -} - -func (any *nilAny) Parse() *Iterator { - return nil -} - -func (any *nilAny) GetInterface() interface{} { - return nil -} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go deleted file mode 100644 index 9d1e901a66ad..000000000000 --- a/vendor/github.com/json-iterator/go/any_number.go +++ /dev/null @@ -1,123 +0,0 @@ -package jsoniter - -import ( - "io" - "unsafe" -) - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go deleted file mode 100644 index c44ef5c989a4..000000000000 --- a/vendor/github.com/json-iterator/go/any_object.go +++ /dev/null @@ -1,374 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type objectLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *objectLazyAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectLazyAny) MustBeValid() Any { - return any -} - -func (any *objectLazyAny) LastError() error { - return any.err -} - -func (any *objectLazyAny) ToBool() bool { - return true -} - -func (any *objectLazyAny) ToInt() int { - return 0 -} - -func (any *objectLazyAny) ToInt32() int32 { - return 0 -} - -func (any *objectLazyAny) ToInt64() int64 { - return 0 -} - -func (any *objectLazyAny) ToUint() uint { - return 0 -} - -func (any *objectLazyAny) ToUint32() uint32 { - return 0 -} - -func (any *objectLazyAny) ToUint64() uint64 { - return 0 -} - -func (any *objectLazyAny) ToFloat32() float32 { - return 0 -} - -func (any *objectLazyAny) ToFloat64() float64 { - return 0 -} - -func (any *objectLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *objectLazyAny) ToVal(obj interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(obj) -} - -func (any *objectLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateObjectField(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - mapped := locatePath(iter, path[1:]) - if mapped.ValueType() != InvalidValue { - mappedAll[field] = mapped - } - return true - }) - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectLazyAny) Keys() []string { - keys := []string{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - iter.Skip() - keys = append(keys, field) - return true - }) - return keys -} - -func (any *objectLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - size++ - return true - }) - return size -} - -func (any *objectLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *objectLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type objectAny struct { - baseAny - err error - val reflect.Value -} - -func wrapStruct(val interface{}) *objectAny { - return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *objectAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectAny) MustBeValid() Any { - return any -} - -func (any *objectAny) Parse() *Iterator { - return nil -} - -func (any *objectAny) LastError() error { - return any.err -} - -func (any *objectAny) ToBool() bool { - return any.val.NumField() != 0 -} - -func (any *objectAny) ToInt() int { - return 0 -} - -func (any *objectAny) ToInt32() int32 { - return 0 -} - -func (any *objectAny) ToInt64() int64 { - return 0 -} - -func (any *objectAny) ToUint() uint { - return 0 -} - -func (any *objectAny) ToUint32() uint32 { - return 0 -} - -func (any *objectAny) ToUint64() uint64 { - return 0 -} - -func (any *objectAny) ToFloat32() float32 { - return 0 -} - -func (any *objectAny) ToFloat64() float64 { - return 0 -} - -func (any *objectAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *objectAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - field := any.val.FieldByName(firstPath) - if !field.IsValid() { - return newInvalidAny(path) - } - return Wrap(field.Interface()) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for i := 0; i < any.val.NumField(); i++ { - field := any.val.Field(i) - if field.CanInterface() { - mapped := Wrap(field.Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[any.val.Type().Field(i).Name] = mapped - } - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectAny) Keys() []string { - keys := make([]string, 0, any.val.NumField()) - for i := 0; i < any.val.NumField(); i++ { - keys = append(keys, any.val.Type().Field(i).Name) - } - return keys -} - -func (any *objectAny) Size() int { - return any.val.NumField() -} - -func (any *objectAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *objectAny) GetInterface() interface{} { - return any.val.Interface() -} - -type mapAny struct { - baseAny - err error - val reflect.Value -} - -func wrapMap(val interface{}) *mapAny { - return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *mapAny) ValueType() ValueType { - return ObjectValue -} - -func (any *mapAny) MustBeValid() Any { - return any -} - -func (any *mapAny) Parse() *Iterator { - return nil -} - -func (any *mapAny) LastError() error { - return any.err -} - -func (any *mapAny) ToBool() bool { - return true -} - -func (any *mapAny) ToInt() int { - return 0 -} - -func (any *mapAny) ToInt32() int32 { - return 0 -} - -func (any *mapAny) ToInt64() int64 { - return 0 -} - -func (any *mapAny) ToUint() uint { - return 0 -} - -func (any *mapAny) ToUint32() uint32 { - return 0 -} - -func (any *mapAny) ToUint64() uint64 { - return 0 -} - -func (any *mapAny) ToFloat32() float32 { - return 0 -} - -func (any *mapAny) ToFloat64() float64 { - return 0 -} - -func (any *mapAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *mapAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for _, key := range any.val.MapKeys() { - keyAsStr := key.String() - element := Wrap(any.val.MapIndex(key).Interface()) - mapped := element.Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[keyAsStr] = mapped - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - value := any.val.MapIndex(reflect.ValueOf(firstPath)) - if !value.IsValid() { - return newInvalidAny(path) - } - return Wrap(value.Interface()) - } -} - -func (any *mapAny) Keys() []string { - keys := make([]string, 0, any.val.Len()) - for _, key := range any.val.MapKeys() { - keys = append(keys, key.String()) - } - return keys -} - -func (any *mapAny) Size() int { - return any.val.Len() -} - -func (any *mapAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *mapAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go deleted file mode 100644 index a4b93c78c822..000000000000 --- a/vendor/github.com/json-iterator/go/any_str.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - endPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - endPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go deleted file mode 100644 index 656bbd33d7ee..000000000000 --- a/vendor/github.com/json-iterator/go/any_uint32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint32Any struct { - baseAny - val uint32 -} - -func (any *uint32Any) LastError() error { - return nil -} - -func (any *uint32Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint32Any) MustBeValid() Any { - return any -} - -func (any *uint32Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint32Any) ToInt() int { - return int(any.val) -} - -func (any *uint32Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint32Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint32Any) ToUint32() uint32 { - return any.val -} - -func (any *uint32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *uint32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *uint32Any) WriteTo(stream *Stream) { - stream.WriteUint32(any.val) -} - -func (any *uint32Any) Parse() *Iterator { - return nil -} - -func (any *uint32Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go deleted file mode 100644 index 7df2fce33ba9..000000000000 --- a/vendor/github.com/json-iterator/go/any_uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint64Any struct { - baseAny - val uint64 -} - -func (any *uint64Any) LastError() error { - return nil -} - -func (any *uint64Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint64Any) MustBeValid() Any { - return any -} - -func (any *uint64Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint64Any) ToInt() int { - return int(any.val) -} - -func (any *uint64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint64Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint64Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *uint64Any) ToUint64() uint64 { - return any.val -} - -func (any *uint64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint64Any) ToString() string { - return strconv.FormatUint(any.val, 10) -} - -func (any *uint64Any) WriteTo(stream *Stream) { - stream.WriteUint64(any.val) -} - -func (any *uint64Any) Parse() *Iterator { - return nil -} - -func (any *uint64Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go deleted file mode 100644 index 8c58fcba5922..000000000000 --- a/vendor/github.com/json-iterator/go/config.go +++ /dev/null @@ -1,375 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "reflect" - "sync" - "unsafe" - - "github.com/modern-go/concurrent" - "github.com/modern-go/reflect2" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - DisallowUnknownFields bool - TagKey string - OnlyTaggedField bool - ValidateJsonRawMessage bool - ObjectFieldMustBeSimpleString bool - CaseSensitive bool -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder - Valid(data []byte) bool - RegisterExtension(extension Extension) - DecoderOf(typ reflect2.Type) ValDecoder - EncoderOf(typ reflect2.Type) ValEncoder -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, // will lose precession - ObjectFieldMustBeSimpleString: true, // do not unescape object field -}.Froze() - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - onlyTaggedField bool - disallowUnknownFields bool - decoderCache *concurrent.Map - encoderCache *concurrent.Map - encoderExtension Extension - decoderExtension Extension - extraExtensions []Extension - streamPool *sync.Pool - iteratorPool *sync.Pool - caseSensitive bool -} - -func (cfg *frozenConfig) initCache() { - cfg.decoderCache = concurrent.NewMap() - cfg.encoderCache = concurrent.NewMap() -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { - cfg.decoderCache.Store(cacheKey, decoder) -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { - cfg.encoderCache.Store(cacheKey, encoder) -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { - decoder, found := cfg.decoderCache.Load(cacheKey) - if found { - return decoder.(ValDecoder) - } - return nil -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { - encoder, found := cfg.encoderCache.Load(cacheKey) - if found { - return encoder.(ValEncoder) - } - return nil -} - -var cfgCache = concurrent.NewMap() - -func getFrozenConfigFromCache(cfg Config) *frozenConfig { - obj, found := cfgCache.Load(cfg) - if found { - return obj.(*frozenConfig) - } - return nil -} - -func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { - cfgCache.Store(cfg, frozenConfig) -} - -// Froze forge API from config -func (cfg Config) Froze() API { - api := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, - onlyTaggedField: cfg.OnlyTaggedField, - disallowUnknownFields: cfg.DisallowUnknownFields, - caseSensitive: cfg.CaseSensitive, - } - api.streamPool = &sync.Pool{ - New: func() interface{} { - return NewStream(api, nil, 512) - }, - } - api.iteratorPool = &sync.Pool{ - New: func() interface{} { - return NewIterator(api) - }, - } - api.initCache() - encoderExtension := EncoderExtension{} - decoderExtension := DecoderExtension{} - if cfg.MarshalFloatWith6Digits { - api.marshalFloatWith6Digits(encoderExtension) - } - if cfg.EscapeHTML { - api.escapeHTML(encoderExtension) - } - if cfg.UseNumber { - api.useNumber(decoderExtension) - } - if cfg.ValidateJsonRawMessage { - api.validateJsonRawMessage(encoderExtension) - } - api.encoderExtension = encoderExtension - api.decoderExtension = decoderExtension - api.configBeforeFrozen = cfg - return api -} - -func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { - api := getFrozenConfigFromCache(cfg) - if api != nil { - return api - } - api = cfg.Froze().(*frozenConfig) - for _, extension := range extraExtensions { - api.RegisterExtension(extension) - } - addFrozenConfigToCache(cfg, api) - return api -} - -func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { - encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { - rawMessage := *(*json.RawMessage)(ptr) - iter := cfg.BorrowIterator([]byte(rawMessage)) - iter.Read() - if iter.Error != nil { - stream.WriteRaw("null") - } else { - cfg.ReturnIterator(iter) - stream.WriteRaw(string(rawMessage)) - } - }, func(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 - }} - extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder - extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder -} - -func (cfg *frozenConfig) useNumber(extension DecoderExtension) { - extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - exitingValue := *((*interface{})(ptr)) - if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { - iter.ReadVal(exitingValue) - return - } - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }} -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) RegisterExtension(extension Extension) { - cfg.extraExtensions = append(cfg.extraExtensions, extension) - copied := cfg.configBeforeFrozen - cfg.configBeforeFrozen = copied -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { - // for better performance - extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} - extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { - encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} - -func (cfg *frozenConfig) Valid(data []byte) bool { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.Skip() - return iter.Error == nil -} diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go deleted file mode 100644 index 95ae54fbfe4d..000000000000 --- a/vendor/github.com/json-iterator/go/iter.go +++ /dev/null @@ -1,322 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - captureStartedAt int - captured []byte - Error error - Attachment interface{} // open for customized decoder -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - peekEnd := iter.head + 10 - if peekEnd > iter.tail { - peekEnd = iter.tail - } - parsing := string(iter.buf[peekStart:peekEnd]) - contextStart := iter.head - 50 - if contextStart < 0 { - contextStart = 0 - } - contextEnd := iter.head + 50 - if contextEnd > iter.tail { - contextEnd = iter.tail - } - context := string(iter.buf[contextStart:contextEnd]) - iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", - operation, msg, iter.head-peekStart, parsing, context) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go deleted file mode 100644 index 6188cb4577ab..000000000000 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ /dev/null @@ -1,58 +0,0 @@ -package jsoniter - -// ReadArray read array element, tells if the array has more element to read. -func (iter *Iterator) ReadArray() (ret bool) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return false // null - case '[': - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - return true - } - return false - case ']': - return false - case ',': - return true - default: - iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) - return - } -} - -// ReadArrayCB read array with callback -func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { - c := iter.nextToken() - if c == '[' { - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - if !callback(iter) { - return false - } - c = iter.nextToken() - for c == ',' { - if !callback(iter) { - return false - } - c = iter.nextToken() - } - if c != ']' { - iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) - return false - } - return true - } - return true - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) - return false -} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go deleted file mode 100644 index 4f883c0959f7..000000000000 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ /dev/null @@ -1,347 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} - -// ReadNumber read json.Number -func (iter *Iterator) ReadNumber() (ret json.Number) { - return json.Number(iter.readNumberAsString()) -} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go deleted file mode 100644 index 2142320355e8..000000000000 --- a/vendor/github.com/json-iterator/go/iter_int.go +++ /dev/null @@ -1,345 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - if strconv.IntSize == 32 { - return uint(iter.ReadUint32()) - } - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - if strconv.IntSize == 32 { - return int(iter.ReadInt32()) - } - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint64(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint64(ind2)*10 + uint64(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -func (iter *Iterator) assertInteger() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { - iter.ReportError("assertInteger", "can not decode float as int") - } -} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go deleted file mode 100644 index 1c575767130d..000000000000 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ /dev/null @@ -1,251 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strings" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) - return - case ',': - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -// CaseInsensitive -func (iter *Iterator) readFieldHash() int64 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c != '"' { - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 - } - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if b == '\\' { - iter.head = i - for _, b := range iter.readStringSlowPath() { - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } -} - -func calcHash(str string, caseSensitive bool) int64 { - if !caseSensitive { - str = strings.ToLower(str) - } - hash := int64(0x811c9dc5) - for _, b := range []byte(str) { - hash ^= int64(b) - hash *= 0x1000193 - } - return int64(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - var field string - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go deleted file mode 100644 index f58beb9137bf..000000000000 --- a/vendor/github.com/json-iterator/go/iter_skip.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsoniter - -import "fmt" - -// ReadNil reads a json object as nil and -// returns whether it's a nil or not -func (iter *Iterator) ReadNil() (ret bool) { - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') // null - return true - } - iter.unreadByte() - return false -} - -// ReadBool reads a json object as BoolValue -func (iter *Iterator) ReadBool() (ret bool) { - c := iter.nextToken() - if c == 't' { - iter.skipThreeBytes('r', 'u', 'e') - return true - } - if c == 'f' { - iter.skipFourBytes('a', 'l', 's', 'e') - return false - } - iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) - return -} - -// SkipAndReturnBytes skip next JSON element, and return its content as []byte. -// The []byte can be kept, it is a copy of data. -func (iter *Iterator) SkipAndReturnBytes() []byte { - iter.startCapture(iter.head) - iter.Skip() - return iter.stopCapture() -} - -type captureBuffer struct { - startedAt int - captured []byte -} - -func (iter *Iterator) startCapture(captureStartedAt int) { - if iter.captured != nil { - panic("already in capture mode") - } - iter.captureStartedAt = captureStartedAt - iter.captured = make([]byte, 0, 32) -} - -func (iter *Iterator) stopCapture() []byte { - if iter.captured == nil { - panic("not in capture mode") - } - captured := iter.captured - remaining := iter.buf[iter.captureStartedAt:iter.head] - iter.captureStartedAt = -1 - iter.captured = nil - if len(captured) == 0 { - copied := make([]byte, len(remaining)) - copy(copied, remaining) - return copied - } - captured = append(captured, remaining...) - return captured -} - -// Skip skips a json object and positions to relatively the next json object -func (iter *Iterator) Skip() { - c := iter.nextToken() - switch c { - case '"': - iter.skipString() - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - case '0': - iter.unreadByte() - iter.ReadFloat32() - case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.skipNumber() - case '[': - iter.skipArray() - case '{': - iter.skipObject() - default: - iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) - return - } -} - -func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b4 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } -} - -func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } -} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go deleted file mode 100644 index 8fcdc3b69bdf..000000000000 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build jsoniter_sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - case ']': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - case '}': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go deleted file mode 100644 index f67bc2e83151..000000000000 --- a/vendor/github.com/json-iterator/go/iter_skip_strict.go +++ /dev/null @@ -1,89 +0,0 @@ -//+build !jsoniter_sloppy - -package jsoniter - -import "fmt" - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - iter.ReadFloat32() - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("trySkipString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go deleted file mode 100644 index adc487ea8048..000000000000 --- a/vendor/github.com/json-iterator/go/iter_str.go +++ /dev/null @@ -1,215 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode/utf16" -) - -// ReadString read string from iterator -func (iter *Iterator) ReadString() (ret string) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - ret = string(iter.buf[iter.head:i]) - iter.head = i + 1 - return ret - } else if c == '\\' { - break - } else if c < ' ' { - iter.ReportError("ReadString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return - } - } - return iter.readStringSlowPath() - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return "" - } - iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readStringSlowPath() (ret string) { - var str []byte - var c byte - for iter.Error == nil { - c = iter.readByte() - if c == '"' { - return string(str) - } - if c == '\\' { - c = iter.readByte() - str = iter.readEscapedChar(c, str) - } else { - str = append(str, c) - } - } - iter.ReportError("readStringSlowPath", "unexpected end of input") - return -} - -func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { - switch c { - case 'u': - r := iter.readU4() - if utf16.IsSurrogate(r) { - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != '\\' { - iter.unreadByte() - str = appendRune(str, r) - return str - } - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != 'u' { - str = appendRune(str, r) - return iter.readEscapedChar(c, str) - } - r2 := iter.readU4() - if iter.Error != nil { - return nil - } - combined := utf16.DecodeRune(r, r2) - if combined == '\uFFFD' { - str = appendRune(str, r) - str = appendRune(str, r2) - } else { - str = appendRune(str, combined) - } - } else { - str = appendRune(str, r) - } - case '"': - str = append(str, '"') - case '\\': - str = append(str, '\\') - case '/': - str = append(str, '/') - case 'b': - str = append(str, '\b') - case 'f': - str = append(str, '\f') - case 'n': - str = append(str, '\n') - case 'r': - str = append(str, '\r') - case 't': - str = append(str, '\t') - default: - iter.ReportError("readEscapedChar", - `invalid escape char after \`) - return nil - } - return str -} - -// ReadStringAsSlice read string from iterator without copying into string form. -// The []byte can not be kept, as it will change after next iterator call. -func (iter *Iterator) ReadStringAsSlice() (ret []byte) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - // for: field name, base64, number - if iter.buf[i] == '"' { - // fast path: reuse the underlying buffer - ret = iter.buf[iter.head:i] - iter.head = i + 1 - return ret - } - } - readLen := iter.tail - iter.head - copied := make([]byte, readLen, readLen*2) - copy(copied, iter.buf[iter.head:iter.tail]) - iter.head = iter.tail - for iter.Error == nil { - c := iter.readByte() - if c == '"' { - return copied - } - copied = append(copied, c) - } - return copied - } - iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readU4() (ret rune) { - for i := 0; i < 4; i++ { - c := iter.readByte() - if iter.Error != nil { - return - } - if c >= '0' && c <= '9' { - ret = ret*16 + rune(c-'0') - } else if c >= 'a' && c <= 'f' { - ret = ret*16 + rune(c-'a'+10) - } else if c >= 'A' && c <= 'F' { - ret = ret*16 + rune(c-'A'+10) - } else { - iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) - return - } - } - return ret -} - -const ( - t1 = 0x00 // 0000 0000 - tx = 0x80 // 1000 0000 - t2 = 0xC0 // 1100 0000 - t3 = 0xE0 // 1110 0000 - t4 = 0xF0 // 1111 0000 - t5 = 0xF8 // 1111 1000 - - maskx = 0x3F // 0011 1111 - mask2 = 0x1F // 0001 1111 - mask3 = 0x0F // 0000 1111 - mask4 = 0x07 // 0000 0111 - - rune1Max = 1<<7 - 1 - rune2Max = 1<<11 - 1 - rune3Max = 1<<16 - 1 - - surrogateMin = 0xD800 - surrogateMax = 0xDFFF - - maxRune = '\U0010FFFF' // Maximum valid Unicode code point. - runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" -) - -func appendRune(p []byte, r rune) []byte { - // Negative values are erroneous. Making it unsigned addresses the problem. - switch i := uint32(r); { - case i <= rune1Max: - p = append(p, byte(r)) - return p - case i <= rune2Max: - p = append(p, t2|byte(r>>6)) - p = append(p, tx|byte(r)&maskx) - return p - case i > maxRune, surrogateMin <= i && i <= surrogateMax: - r = runeError - fallthrough - case i <= rune3Max: - p = append(p, t3|byte(r>>12)) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - default: - p = append(p, t4|byte(r>>18)) - p = append(p, tx|byte(r>>12)&maskx) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - } -} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go deleted file mode 100644 index c2934f916eb3..000000000000 --- a/vendor/github.com/json-iterator/go/jsoniter.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package jsoniter implements encoding and decoding of JSON as defined in -// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. -// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter -// and variable type declarations (if any). -// jsoniter interfaces gives 100% compatibility with code using standard lib. -// -// "JSON and Go" -// (https://golang.org/doc/articles/json_and_go.html) -// gives a description of how Marshal/Unmarshal operate -// between arbitrary or predefined json objects and bytes, -// and it applies to jsoniter.Marshal/Unmarshal as well. -// -// Besides, jsoniter.Iterator provides a different set of interfaces -// iterating given bytes/string/reader -// and yielding parsed elements one by one. -// This set of interfaces reads input as required and gives -// better performance. -package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go deleted file mode 100644 index e2389b56cfff..000000000000 --- a/vendor/github.com/json-iterator/go/pool.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - stream := cfg.streamPool.Get().(*Stream) - stream.Reset(writer) - return stream -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.out = nil - stream.Error = nil - stream.Attachment = nil - cfg.streamPool.Put(stream) -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - iter := cfg.iteratorPool.Get().(*Iterator) - iter.ResetBytes(data) - return iter -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - iter.Attachment = nil - cfg.iteratorPool.Put(iter) -} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go deleted file mode 100644 index 4459e203fb85..000000000000 --- a/vendor/github.com/json-iterator/go/reflect.go +++ /dev/null @@ -1,332 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "unsafe" - - "github.com/modern-go/reflect2" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -type ctx struct { - *frozenConfig - prefix string - encoders map[reflect2.Type]ValEncoder - decoders map[reflect2.Type]ValDecoder -} - -func (b *ctx) caseSensitive() bool { - if b.frozenConfig == nil { - // default is case-insensitive - return false - } - return b.frozenConfig.caseSensitive -} - -func (b *ctx) append(prefix string) *ctx { - return &ctx{ - frozenConfig: b.frozenConfig, - prefix: b.prefix + " " + prefix, - encoders: b.encoders, - decoders: b.decoders, - } -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - cacheKey := reflect2.RTypeOf(obj) - decoder := iter.cfg.getDecoderFromCache(cacheKey) - if decoder == nil { - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - iter.ReportError("ReadVal", "can only unmarshal into pointer") - return - } - decoder = iter.cfg.DecoderOf(typ) - } - ptr := reflect2.PtrOf(obj) - if ptr == nil { - iter.ReportError("ReadVal", "can not read into nil pointer") - return - } - decoder.Decode(ptr, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - cacheKey := reflect2.RTypeOf(val) - encoder := stream.cfg.getEncoderFromCache(cacheKey) - if encoder == nil { - typ := reflect2.TypeOf(val) - encoder = stream.cfg.EncoderOf(typ) - } - encoder.Encode(reflect2.PtrOf(val), stream) -} - -func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { - cacheKey := typ.RType() - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - ptrType := typ.(*reflect2.UnsafePtrType) - decoder = decoderOfType(ctx, ptrType.Elem()) - cfg.addDecoderToCache(cacheKey, decoder) - return decoder -} - -func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfType(ctx, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - return decoder -} - -func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoders[typ] - if decoder != nil { - return decoder - } - placeholder := &placeholderDecoder{} - ctx.decoders[typ] = placeholder - decoder = _createDecoderOfType(ctx, typ) - placeholder.decoder = decoder - return decoder -} - -func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := createDecoderOfJsonRawMessage(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfJsonNumber(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfMarshaler(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfAny(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfNative(ctx, typ) - if decoder != nil { - return decoder - } - switch typ.Kind() { - case reflect.Interface: - ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) - if isIFace { - return &ifaceDecoder{valType: ifaceType} - } - return &efaceDecoder{} - case reflect.Struct: - return decoderOfStruct(ctx, typ) - case reflect.Array: - return decoderOfArray(ctx, typ) - case reflect.Slice: - return decoderOfSlice(ctx, typ) - case reflect.Map: - return decoderOfMap(ctx, typ) - case reflect.Ptr: - return decoderOfOptional(ctx, typ) - default: - return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { - cacheKey := typ.RType() - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - encoder = encoderOfType(ctx, typ) - if typ.LikePtr() { - encoder = &onePtrEncoder{encoder} - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder -} - -type onePtrEncoder struct { - encoder ValEncoder -} - -func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfType(ctx, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - return encoder -} - -func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoders[typ] - if encoder != nil { - return encoder - } - placeholder := &placeholderEncoder{} - ctx.encoders[typ] = placeholder - encoder = _createEncoderOfType(ctx, typ) - placeholder.encoder = encoder - return encoder -} -func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := createEncoderOfJsonRawMessage(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfJsonNumber(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfMarshaler(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfAny(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return encoderOfStruct(ctx, typ) - case reflect.Array: - return encoderOfArray(ctx, typ) - case reflect.Slice: - return encoderOfSlice(ctx, typ) - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return encoderOfOptional(ctx, typ) - default: - return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -type lazyErrorDecoder struct { - err error -} - -func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() != NilValue { - if iter.Error == nil { - iter.Error = decoder.err - } - } else { - iter.Skip() - } -} - -type lazyErrorEncoder struct { - err error -} - -func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if ptr == nil { - stream.WriteNil() - } else if stream.Error == nil { - stream.Error = encoder.err - } -} - -func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type placeholderDecoder struct { - decoder ValDecoder -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(ptr, iter) -} - -type placeholderEncoder struct { - encoder ValEncoder -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(ptr) -} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go deleted file mode 100644 index 13a0b7b0878c..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_array.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayDecoder{arrayType, decoder} -} - -func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - if arrayType.Len() == 0 { - return emptyArrayEncoder{} - } - encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayEncoder{arrayType, encoder} -} - -type emptyArrayEncoder struct{} - -func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyArray() -} - -func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return true -} - -type arrayEncoder struct { - arrayType *reflect2.UnsafeArrayType - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType *reflect2.UnsafeArrayType - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - arrayType := decoder.arrayType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return - } - if c != '[' { - iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - return - } - iter.unreadByte() - elemPtr := arrayType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - if length >= arrayType.Len() { - iter.Skip() - continue - } - idx := length - length += 1 - elemPtr = arrayType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go deleted file mode 100644 index 8b6bc8b43328..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_dynamic.go +++ /dev/null @@ -1,70 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "reflect" - "unsafe" -) - -type dynamicEncoder struct { - valType reflect2.Type -} - -func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - stream.WriteVal(obj) -} - -func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.valType.UnsafeIndirect(ptr) == nil -} - -type efaceDecoder struct { -} - -func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - pObj := (*interface{})(ptr) - obj := *pObj - if obj == nil { - *pObj = iter.Read() - return - } - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - *pObj = iter.Read() - return - } - ptrType := typ.(*reflect2.UnsafePtrType) - ptrElemType := ptrType.Elem() - if iter.WhatIsNext() == NilValue { - if ptrElemType.Kind() != reflect.Ptr { - iter.skipFourBytes('n', 'u', 'l', 'l') - *pObj = nil - return - } - } - if reflect2.IsNil(obj) { - obj := ptrElemType.New() - iter.ReadVal(obj) - *pObj = obj - return - } - iter.ReadVal(obj) -} - -type ifaceDecoder struct { - valType *reflect2.UnsafeIFaceType -} - -func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) - return - } - obj := decoder.valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - iter.ReportError("decode non empty interface", "can not unmarshal into nil") - return - } - iter.ReadVal(obj) -} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go deleted file mode 100644 index 04f68756bf35..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ /dev/null @@ -1,483 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - Type reflect2.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name() == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field reflect2.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateMapKeyDecoder(typ reflect2.Type) ValDecoder - CreateMapKeyEncoder(typ reflect2.Type) ValEncoder - CreateDecoder(typ reflect2.Type) ValDecoder - CreateEncoder(typ reflect2.Type) ValEncoder - DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type EncoderExtension map[reflect2.Type]ValEncoder - -// UpdateStructDescriptor No-op -func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder get encoder from map -func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return extension[typ] -} - -// CreateMapKeyDecoder No-op -func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type DecoderExtension map[reflect2.Type]ValDecoder - -// UpdateStructDescriptor No-op -func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder get decoder from map -func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return extension[typ] -} - -// CreateEncoder No-op -func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - decoder := ctx.decoderExtension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder = typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - ptrType := typ.(*reflect2.UnsafePtrType) - decoder := typeDecoders[ptrType.Elem().String()] - if decoder != nil { - return &OptionalDecoder{ptrType.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - encoder := ctx.encoderExtension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder = typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - typePtr := typ.(*reflect2.UnsafePtrType) - encoder := typeEncoders[typePtr.Elem().String()] - if encoder != nil { - return &OptionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { - structType := typ.(*reflect2.UnsafeStructType) - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < structType.NumField(); i++ { - field := structType.Field(i) - tag, hastag := field.Tag().Lookup(ctx.getTagKey()) - if ctx.onlyTaggedField && !hastag { - continue - } - tagParts := strings.Split(tag, ",") - if tag == "-" { - continue - } - if field.Anonymous() && (tag == "" || tagParts[0] == "") { - if field.Type().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, field.Type()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type().Kind() == reflect.Ptr { - ptrType := field.Type().(*reflect2.UnsafePtrType) - if ptrType.Elem().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, ptrType.Elem()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &dereferenceEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - } - fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - decoder = decoderOfType(ctx.append(field.Name()), field.Type()) - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - encoder = encoderOfType(ctx.append(field.Name()), field.Type()) - } - binding := &Binding{ - Field: field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(ctx, typ, bindings, embeddedBindings) -} -func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - structDescriptor := &StructDescriptor{ - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) - ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) - for _, extension := range ctx.extraExtensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, ctx.frozenConfig) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type().Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go deleted file mode 100644 index 98d45c1ec255..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_json_number.go +++ /dev/null @@ -1,112 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "strconv" - "unsafe" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} - -var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() -var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() - -func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*json.Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go deleted file mode 100644 index f2619936c88f..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ /dev/null @@ -1,60 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "unsafe" -) - -var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() -var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() - -func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go deleted file mode 100644 index 7f66a88b043d..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ /dev/null @@ -1,326 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "sort" - "unsafe" -) - -func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { - mapType := typ.(*reflect2.UnsafeMapType) - keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) - elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) - return &mapDecoder{ - mapType: mapType, - keyType: mapType.Key(), - elemType: mapType.Elem(), - keyDecoder: keyDecoder, - elemDecoder: elemDecoder, - } -} - -func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { - mapType := typ.(*reflect2.UnsafeMapType) - if ctx.sortMapKeys { - return &sortKeysMapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } - } - return &mapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } -} - -func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - } - switch typ.Kind() { - case reflect.String: - return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyDecoder{decoderOfType(ctx, typ)} - default: - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(textMarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textMarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } - return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - } - switch typ.Kind() { - case reflect.String: - return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyEncoder{encoderOfType(ctx, typ)} - default: - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Kind() == reflect.Interface { - return &dynamicMapKeyEncoder{ctx, typ} - } - return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -type mapDecoder struct { - mapType *reflect2.UnsafeMapType - keyType reflect2.Type - elemType reflect2.Type - keyDecoder ValDecoder - elemDecoder ValDecoder -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - mapType := decoder.mapType - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - *(*unsafe.Pointer)(ptr) = nil - mapType.UnsafeSet(ptr, mapType.UnsafeNew()) - return - } - if mapType.UnsafeIsNil(ptr) { - mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) - } - if c != '{' { - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return - } - c = iter.nextToken() - if c == '}' { - return - } - if c != '"' { - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return - } - iter.unreadByte() - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - } - if c != '}' { - iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) - } -} - -type numericMapKeyDecoder struct { - decoder ValDecoder -} - -func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } - decoder.decoder.Decode(ptr, iter) - c = iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } -} - -type numericMapKeyEncoder struct { - encoder ValEncoder -} - -func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.encoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type dynamicMapKeyEncoder struct { - ctx *ctx - valType reflect2.Type -} - -func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) -} - -func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - obj := encoder.valType.UnsafeIndirect(ptr) - return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) -} - -type mapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - iter := encoder.mapType.UnsafeIterate(ptr) - for i := 0; iter.HasNext(); i++ { - if i != 0 { - stream.WriteMore() - } - key, elem := iter.UnsafeNext() - encoder.keyEncoder.Encode(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, stream) - } - stream.WriteObjectEnd() -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type sortKeysMapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *(*unsafe.Pointer)(ptr) == nil { - stream.WriteNil() - return - } - stream.WriteObjectStart() - mapIter := encoder.mapType.UnsafeIterate(ptr) - subStream := stream.cfg.BorrowStream(nil) - subIter := stream.cfg.BorrowIterator(nil) - keyValues := encodedKeyValues{} - for mapIter.HasNext() { - subStream.buf = make([]byte, 0, 64) - key, elem := mapIter.UnsafeNext() - encoder.keyEncoder.Encode(key, subStream) - if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { - stream.Error = subStream.Error - } - encodedKey := subStream.Buffer() - subIter.ResetBytes(encodedKey) - decodedKey := subIter.ReadString() - if stream.indention > 0 { - subStream.writeTwoBytes(byte(':'), byte(' ')) - } else { - subStream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, subStream) - keyValues = append(keyValues, encodedKV{ - key: decodedKey, - keyValue: subStream.Buffer(), - }) - } - sort.Sort(keyValues) - for i, keyValue := range keyValues { - if i != 0 { - stream.WriteMore() - } - stream.Write(keyValue.keyValue) - } - stream.WriteObjectEnd() - stream.cfg.ReturnStream(subStream) - stream.cfg.ReturnIterator(subIter) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type encodedKeyValues []encodedKV - -type encodedKV struct { - key string - keyValue []byte -} - -func (sv encodedKeyValues) Len() int { return len(sv) } -func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go deleted file mode 100644 index 58ac959ad8bf..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ /dev/null @@ -1,218 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "github.com/modern-go/reflect2" - "unsafe" -) - -var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() -var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() -var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() - -func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ptrType}, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ptrType}, - } - } - return nil -} - -func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == marshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - } - return encoder - } - if typ.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &marshalerEncoder{ - valType: typ, - checkIsEmpty: checkIsEmpty, - } - return encoder - } - ptrType := reflect2.PtrTo(typ) - if ctx.prefix != "" && ptrType.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &marshalerEncoder{ - valType: ptrType, - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - if typ == textMarshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directTextMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - return encoder - } - if typ.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return encoder - } - // if prefix is empty, the type is the root type - if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: ptrType, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - return nil -} - -type marshalerEncoder struct { - checkIsEmpty checkIsEmpty - valType reflect2.Type -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := obj.(json.Marshaler) - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directMarshalerEncoder struct { - checkIsEmpty checkIsEmpty -} - -func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*json.Marshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} - -func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - valType reflect2.Type - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := (obj).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directTextMarshalerEncoder struct { - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*encoding.TextMarshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - unmarshaler := obj.(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - ptrType := valType.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elem := elemType.UnsafeNew() - ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) - obj = valType.UnsafeIndirect(ptr) - } - unmarshaler := (obj).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go deleted file mode 100644 index 9042eb0cb989..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_native.go +++ /dev/null @@ -1,451 +0,0 @@ -package jsoniter - -import ( - "encoding/base64" - "reflect" - "strconv" - "unsafe" - - "github.com/modern-go/reflect2" -) - -const ptrSize = 32 << uintptr(^uintptr(0)>>63) - -func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type base64Codec struct { - sliceType *reflect2.UnsafeSliceType - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - codec.sliceType.UnsafeSetNil(ptr) - return - } - switch iter.WhatIsNext() { - case StringValue: - src := iter.ReadString() - dst, err := base64.StdEncoding.DecodeString(src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go deleted file mode 100644 index 43ec71d6dadf..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ /dev/null @@ -1,133 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "reflect" - "unsafe" -) - -func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - decoder := decoderOfType(ctx, elemType) - if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { - return &dereferenceDecoder{elemType, decoder} - } - return &OptionalDecoder{elemType, decoder} -} - -func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elemEncoder := encoderOfType(ctx, elemType) - encoder := &OptionalEncoder{elemEncoder} - return encoder -} - -type OptionalDecoder struct { - ValueType reflect2.Type - ValueDecoder ValDecoder -} - -func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.ValueType.UnsafeNew() - decoder.ValueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type dereferenceDecoder struct { - // only to deference a pointer - valueType reflect2.Type - valueDecoder ValDecoder -} - -func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.valueType.UnsafeNew() - decoder.valueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type OptionalEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*unsafe.Pointer)(ptr)) == nil -} - -type dereferenceEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - dePtr := *((*unsafe.Pointer)(ptr)) - if dePtr == nil { - return true - } - return encoder.ValueEncoder.IsEmpty(dePtr) -} - -func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - deReferenced := *((*unsafe.Pointer)(ptr)) - if deReferenced == nil { - return true - } - isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := unsafe.Pointer(deReferenced) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type referenceEncoder struct { - encoder ValEncoder -} - -func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -type referenceDecoder struct { - decoder ValDecoder -} - -func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) -} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go deleted file mode 100644 index 9441d79df33b..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_slice.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceDecoder{sliceType, decoder} -} - -func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceEncoder{sliceType, encoder} -} - -type sliceEncoder struct { - sliceType *reflect2.UnsafeSliceType - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if encoder.sliceType.UnsafeIsNil(ptr) { - stream.WriteNil() - return - } - length := encoder.sliceType.UnsafeLengthOf(ptr) - if length == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) - for i := 1; i < length; i++ { - stream.WriteMore() - elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.sliceType.UnsafeLengthOf(ptr) == 0 -} - -type sliceDecoder struct { - sliceType *reflect2.UnsafeSliceType - elemDecoder ValDecoder -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - sliceType := decoder.sliceType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - sliceType.UnsafeSetNil(ptr) - return - } - if c != '[' { - iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) - return - } - iter.unreadByte() - sliceType.UnsafeGrow(ptr, 1) - elemPtr := sliceType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - idx := length - length += 1 - sliceType.UnsafeGrow(ptr, length) - elemPtr = sliceType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go deleted file mode 100644 index 355d2d116b4c..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ /dev/null @@ -1,1048 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "strings" - "unsafe" - - "github.com/modern-go/reflect2" -) - -func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { - bindings := map[string]*Binding{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[k] = binding.Decoder.(*structFieldDecoder) - } - - if !ctx.caseSensitive() { - for k, binding := range bindings { - if _, found := fields[strings.ToLower(k)]; !found { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - } - } - - return createStructDecoder(ctx, typ, fields) -} - -func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { - if ctx.disallowUnknownFields { - return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} - } - knownHash := map[int64]struct{}{ - 0: {}, - } - - switch len(fields) { - case 0: - return &skipObjectDecoder{typ} - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} - } - case 2: - var fieldHash1 int64 - var fieldHash2 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} - case 3: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3} - case 4: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4} - case 5: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5} - case 6: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6} - case 7: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7} - case 8: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8} - case 9: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9} - case 10: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldName10 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10} - } - return &generalStructDecoder{typ, fields, false} -} - -type generalStructDecoder struct { - typ reflect2.Type - fields map[string]*structFieldDecoder - disallowUnknownFields bool -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - var c byte - for c = ','; c == ','; c = iter.nextToken() { - decoder.decodeOneField(ptr, iter) - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - if c != '}' { - iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) - } -} - -func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { - var field string - var fieldDecoder *structFieldDecoder - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes := iter.ReadStringAsSlice() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } else { - field = iter.ReadString() - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } - if fieldDecoder == nil { - msg := "found unknown field: " + field - if decoder.disallowUnknownFields { - iter.ReportError("ReadObject", msg) - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - iter.Skip() - return - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - fieldDecoder.Decode(ptr, iter) -} - -type skipObjectDecoder struct { - typ reflect2.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect2.Type - fieldHash int64 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type twoFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type threeFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type fourFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type fiveFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type sixFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type sevenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type eightFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type nineFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type tenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder - fieldHash10 int64 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type structFieldDecoder struct { - field reflect2.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := decoder.field.UnsafeGet(ptr) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go deleted file mode 100644 index d0759cf6418c..000000000000 --- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "unsafe" -) - -func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{} - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{typ, finalOrderedFields} -} - -func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { - encoder := createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return &structEncoder{typ: typ} - case reflect.Array: - return &arrayEncoder{} - case reflect.Slice: - return &sliceEncoder{} - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return &OptionalEncoder{} - default: - return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} - } -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -type structFieldEncoder struct { - field reflect2.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := encoder.field.UnsafeGet(ptr) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := encoder.field.UnsafeGet(ptr) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := encoder.field.UnsafeGet(ptr) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type IsEmbeddedPtrNil interface { - IsEmbeddedPtrNil(ptr unsafe.Pointer) bool -} - -type structEncoder struct { - typ reflect2.Type - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if field.encoder.IsEmbeddedPtrNil(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go deleted file mode 100644 index 17662fdedcb5..000000000000 --- a/vendor/github.com/json-iterator/go/stream.go +++ /dev/null @@ -1,211 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - Error error - indention int - Attachment interface{} // open for customized encoder -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, 0, bufSize), - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.buf = stream.buf[:0] -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return cap(stream.buf) - len(stream.buf) -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return len(stream.buf) -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf -} - -// SetBuffer allows to append to the internal buffer directly -func (stream *Stream) SetBuffer(buf []byte) { - stream.buf = buf -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - stream.buf = append(stream.buf, p...) - if stream.out != nil { - nn, err = stream.out.Write(stream.buf) - stream.buf = stream.buf[nn:] - return - } - return len(p), nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - stream.buf = append(stream.buf, c) -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - stream.buf = append(stream.buf, c1, c2) -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - stream.buf = append(stream.buf, c1, c2, c3) -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4) -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4, c5) -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - n, err := stream.out.Write(stream.buf) - if err != nil { - if stream.Error == nil { - stream.Error = err - } - return err - } - stream.buf = stream.buf[n:] - return nil -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.buf = append(stream.buf, s...) -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) - stream.Flush() -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeTwoBytes('[', ']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - for i := 0; i < toWrite; i++ { - stream.buf = append(stream.buf, ' ') - } -} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go deleted file mode 100644 index f318d2c59da3..000000000000 --- a/vendor/github.com/json-iterator/go/stream_float.go +++ /dev/null @@ -1,94 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go deleted file mode 100644 index d1059ee4c20e..000000000000 --- a/vendor/github.com/json-iterator/go/stream_int.go +++ /dev/null @@ -1,190 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(space []byte, v uint32) []byte { - start := v >> 24 - if start == 0 { - space = append(space, byte(v>>16), byte(v>>8)) - } else if start == 1 { - space = append(space, byte(v>>8)) - } - space = append(space, byte(v)) - return space -} - -func writeBuf(buf []byte, v uint32) []byte { - return append(buf, byte(v>>16), byte(v>>8), byte(v)) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint8(nval) - } - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint16(nval) - } - stream.WriteUint16(val) -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - } else { - r3 := q2 - q3*1000 - stream.buf = append(stream.buf, byte(q3+'0')) - stream.buf = writeBuf(stream.buf, digits[r3]) - } - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint32(nval) - } - stream.WriteUint32(val) -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q3]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q4]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q5]) - } else { - stream.buf = writeFirstBuf(stream.buf, digits[q6]) - r6 := q5 - q6*1000 - stream.buf = writeBuf(stream.buf, digits[r6]) - } - stream.buf = writeBuf(stream.buf, digits[r5]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint64(nval) - } - stream.WriteUint64(val) -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go deleted file mode 100644 index 54c2ba0b3a2d..000000000000 --- a/vendor/github.com/json-iterator/go/stream_str.go +++ /dev/null @@ -1,372 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML