diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0061f065240..7a7566a7ab4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @r2d4 @dgageot @balopat @nkubala +* @r2d4 @dgageot @balopat @nkubala @priyawadhwa diff --git a/.gitignore b/.gitignore index d4db4cb6e6b..b259f2fae0f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ out/ examples/bazel/bazel-* integration/examples/bazel/bazel-* +integration/examples/test-plugin/local/bazel/bazel-* *.new .idea/ docs/.firebase @@ -8,4 +9,5 @@ docs/firebase-debug* docs/public docs/resources docs/node_modules -docs/package-lock.json \ No newline at end of file +docs/package-lock.json +pkg/skaffold/color/debug.test \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index b4aa0da711a..64bbcd2fb75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,237 @@ +# v0.23.0 Release - 2/14/2019 + +*Note*: This release comes with a config change, use `skaffold fix` to permanently upgrade your config to `v1beta5`, however old versions are now auto-upgraded. +See [deprecation-policy.md](/deprecation-policy.md) for details on what beta means. + +*Deprecation notice*: With this release we mark for deprecation the following env variables in the `envTemplate` tagger: +- `DIGEST` +- `DIGEST_ALGO` +- `DIGEST_HEX` +Currently these variables resolve to `_DEPRECATED__`, and the new tagging mechanism adds a digest to the image name thus it shouldn't break existing configurations. +This backward compatibility behavior will be removed earliest 05/14/2019. + +New features: +* Builder plugin for docker in GCB [#1577](https://github.com/GoogleContainerTools/skaffold/pull/1577) +* Add custom build arguments in jib artifacts [#1609](https://github.com/GoogleContainerTools/skaffold/pull/1609) +* Generate json schema [#1644](https://github.com/GoogleContainerTools/skaffold/pull/1644) +* Add --color option [#1618](https://github.com/GoogleContainerTools/skaffold/pull/1618) +* v1beta5 [#1610](https://github.com/GoogleContainerTools/skaffold/pull/1610) +* Experimental UI mode for `skaffold dev` [#1533](https://github.com/GoogleContainerTools/skaffold/pull/1533) +* Upgrade to Kaniko v0.8.0 [#1603](https://github.com/GoogleContainerTools/skaffold/pull/1603) +* New tagging mechanism [#1482](https://github.com/GoogleContainerTools/skaffold/pull/1482) +* Add --build-image option to build command [#1591](https://github.com/GoogleContainerTools/skaffold/pull/1591) +* Allow user to specify custom kaniko image [#1588](https://github.com/GoogleContainerTools/skaffold/pull/1588) +* Better profiles [#1541](https://github.com/GoogleContainerTools/skaffold/pull/1541) + +Fixes: +* Don't push all tags when sha256 builds just one [#1634](https://github.com/GoogleContainerTools/skaffold/pull/1634) +* Handle env commands with multiple variable definitions (#1625) [#1626](https://github.com/GoogleContainerTools/skaffold/pull/1626) +* Rollback Docker dependencies filtering based on target [#1620](https://github.com/GoogleContainerTools/skaffold/pull/1620) +* Fix sub directory support with Kaniko and GCB [#1613](https://github.com/GoogleContainerTools/skaffold/pull/1613) +* Fix regression from port forwarding [#1616](https://github.com/GoogleContainerTools/skaffold/pull/1616) +* Check for new skaffold version when skaffold.yaml parsing fails [#1587](https://github.com/GoogleContainerTools/skaffold/pull/1587) +* Propagate --skip-tests to builders [#1598](https://github.com/GoogleContainerTools/skaffold/pull/1598) +* fix docs build [#1607](https://github.com/GoogleContainerTools/skaffold/pull/1607) +* Ignore cache-from pull errors [#1604](https://github.com/GoogleContainerTools/skaffold/pull/1604) +* `[kubectl]` apply labels by patching yaml [#1489](https://github.com/GoogleContainerTools/skaffold/pull/1489) + +Updates & refactorings: +* Optimise sync [#1641](https://github.com/GoogleContainerTools/skaffold/pull/1641) +* kubectl deployer: warn when pattern matches no file [#1647](https://github.com/GoogleContainerTools/skaffold/pull/1647) +* Add integration tests for taggers [#1635](https://github.com/GoogleContainerTools/skaffold/pull/1635) +* Adding a few tests for `skaffold build` [#1628](https://github.com/GoogleContainerTools/skaffold/pull/1628) +* adding scripts for preparing new config version [#1584](https://github.com/GoogleContainerTools/skaffold/pull/1584) +* Remove Tagger from Builder interface [#1601](https://github.com/GoogleContainerTools/skaffold/pull/1601) +* copyright 2019 [#1606](https://github.com/GoogleContainerTools/skaffold/pull/1606) +* Remove unused constants [#1602](https://github.com/GoogleContainerTools/skaffold/pull/1602) +* Remove stopped containers in make targets [#1590](https://github.com/GoogleContainerTools/skaffold/pull/1590) +* Add missing tests for build/sequence.go [#1575](https://github.com/GoogleContainerTools/skaffold/pull/1575) +* Extract yaml used in documentation into files [#1593](https://github.com/GoogleContainerTools/skaffold/pull/1593) + +Docs updates: +* Improve comments and schema [#1652](https://github.com/GoogleContainerTools/skaffold/pull/1652) +* Add `required` tags [#1642](https://github.com/GoogleContainerTools/skaffold/pull/1642) +* Add more comments to the Config structs [#1630](https://github.com/GoogleContainerTools/skaffold/pull/1630) +* Add short docs about automatic port-forwarding [#1637](https://github.com/GoogleContainerTools/skaffold/pull/1637) +* Improve documentation [#1599](https://github.com/GoogleContainerTools/skaffold/pull/1599) +* Fix DEVELOPMENT.md fragment [#1576](https://github.com/GoogleContainerTools/skaffold/pull/1576) +* Improve the Skaffold.dev documentation [#1579](https://github.com/GoogleContainerTools/skaffold/pull/1579) + +Huge thanks goes out to all of our contributors for this release: + +- Balint Pato +- Brian de Alwis +- Cornelius Weig +- David Gageot +- Michael Beaumont +- Michael FIG +- Nick Kubala +- Priya Wadhwa +- Shuhei Kitagawa + + +# v0.22.0 Release - 1/31/2019 + +Note: This release comes with a config change, use `skaffold fix` to permanently upgrade your config to `v1beta4`, however old versions are now auto-upgraded. +See [deprecation-policy.md](/deprecation-policy.md) for details on what beta means. + +New features: +* Introduce configuration option to configure image pushing per kube-context [#1355](https://github.com/GoogleContainerTools/skaffold/pull/1355) +* Better support for docker build with a target [#1497](https://github.com/GoogleContainerTools/skaffold/pull/1497) +* Reintroduce the fsNotify trigger [#1562](https://github.com/GoogleContainerTools/skaffold/pull/1562) +* Add zsh completion [#1531](https://github.com/GoogleContainerTools/skaffold/pull/1531) +* `#296` Support remote helm chart repositories [#1254](https://github.com/GoogleContainerTools/skaffold/pull/1254) + +Fixes: +* Fix bug in port forwarding [#1529](https://github.com/GoogleContainerTools/skaffold/pull/1529) +* Fix doc for Kustomize deploy: path option [#1527](https://github.com/GoogleContainerTools/skaffold/pull/1527) +* Fix broken links in Getting Started [#1523](https://github.com/GoogleContainerTools/skaffold/pull/1523) +* Use configured namespace for pod watcher. [#1473](https://github.com/GoogleContainerTools/skaffold/pull/1473) +* Pass DOCKER* env variables for jib to connect to minikube [#1505](https://github.com/GoogleContainerTools/skaffold/pull/1505) + +Updates & Refactorings: +* Upgrade to jib 1.0.0 [#1512](https://github.com/GoogleContainerTools/skaffold/pull/1512) +* Don’t use local Docker to push Bazel images [#1493](https://github.com/GoogleContainerTools/skaffold/pull/1493) +* Use kubectl to read the manifests [#1451](https://github.com/GoogleContainerTools/skaffold/pull/1451) +* Simplify integration tests [#1539](https://github.com/GoogleContainerTools/skaffold/pull/1539) +* Fix master branch [#1569](https://github.com/GoogleContainerTools/skaffold/pull/1569) +* Add missing tests for watch/triggers [#1557](https://github.com/GoogleContainerTools/skaffold/pull/1557) +* Improve triggers [#1561](https://github.com/GoogleContainerTools/skaffold/pull/1561) +* Add tests for labels package [#1534](https://github.com/GoogleContainerTools/skaffold/pull/1534) + +Docs updates: +* Fix skaffold.dev indexing on Google [#1547](https://github.com/GoogleContainerTools/skaffold/pull/1547) +* 2019 roadmap [#1530](https://github.com/GoogleContainerTools/skaffold/pull/1530) +* Should be v1beta3 [#1515](https://github.com/GoogleContainerTools/skaffold/pull/1515) +* Renaming the CoC for GitHub [#1518](https://github.com/GoogleContainerTools/skaffold/pull/1518) +* Add Priya as a Codeowner [#1544](https://github.com/GoogleContainerTools/skaffold/pull/1544) +* Add Priya as a maintainer [#1542](https://github.com/GoogleContainerTools/skaffold/pull/1542) +* Note JVM flags specific to Java 8 in examples/jib [#1563](https://github.com/GoogleContainerTools/skaffold/pull/1563) + +Huge thanks goes out to all of our contributors for this release: + +- Balint Pato +- Brian de Alwis +- Cornelius Weig +- David Gageot +- Koen De Keyser +- Labesse Kévin +- Michael FIG +- Nick Kubala +- Priya Wadhwa +- Shuhei Kitagawa +- czhc + + +# v0.21.1 Release - 1/22/2019 + +New Features: +* Add a log when bazel deps take a long time [#1498](https://github.com/GoogleContainerTools/skaffold/pull/1498) +* Pre-pull cache-from images [#1495](https://github.com/GoogleContainerTools/skaffold/pull/1495) +* Pass bazel args to `bazel info bazel-bin` [#1487](https://github.com/GoogleContainerTools/skaffold/pull/1487) +* Support secretGenerators with kustomize [#1488](https://github.com/GoogleContainerTools/skaffold/pull/1488) + + +Fixes: +* Fix coloured output when building in // [#1501](https://github.com/GoogleContainerTools/skaffold/pull/1501) +* Fix onbuild analysis [#1491](https://github.com/GoogleContainerTools/skaffold/pull/1491) +* Fix Broken link to references/config in documentation [#1486](https://github.com/GoogleContainerTools/skaffold/pull/1486) + + +Updates & refactorings: +* Add error for non Docker artifacts built with Kaniko [#1494](https://github.com/GoogleContainerTools/skaffold/pull/1494) +* Update bazel example [#1492](https://github.com/GoogleContainerTools/skaffold/pull/1492) +* Revert "Merge pull request #1439 from ltouati/fsnotify" [#1508](https://github.com/GoogleContainerTools/skaffold/pull/1508) +* Don’t log if nothing is copied or deleted [#1504](https://github.com/GoogleContainerTools/skaffold/pull/1504) +* Add more integration tests [#1502](https://github.com/GoogleContainerTools/skaffold/pull/1502) +* Remove file committed by error [#1500](https://github.com/GoogleContainerTools/skaffold/pull/1500) + + +Docs updates: +* Update doc around local development [#1446](https://github.com/GoogleContainerTools/skaffold/pull/1446) +* [doc] Fix default value for manifests [#1485](https://github.com/GoogleContainerTools/skaffold/pull/1485) + +Huge thanks goes out to all of our contributors for this release: + +- David Gageot +- Nick Kubala +- Priya Wadhwa +- Shane Lee + + +# v0.21.0 Release - 1/17/2019 + +Note: This release comes with a config change, use `skaffold fix` to permanently upgrade your config to `v1beta2`, however old versions are now auto-upgraded. +See [deprecation-policy.md](/deprecation-policy.md) for details on what beta means. + +New Features: +* Add support for urls in deploy.kubectl.manifests [#1408](https://github.com/GoogleContainerTools/skaffold/pull/1408) +* Add some tests for Sync [#1406](https://github.com/GoogleContainerTools/skaffold/pull/1406) +* Get digest on push and imageID on build [#1428](https://github.com/GoogleContainerTools/skaffold/pull/1428) +* Implement a notification based watcher [#1439](https://github.com/GoogleContainerTools/skaffold/pull/1439) +* Add k8s version check to kustomize deployer [#1449](https://github.com/GoogleContainerTools/skaffold/pull/1449) +* Support new K8s context name in Docker Desktop [#1448](https://github.com/GoogleContainerTools/skaffold/pull/1448) +* Upload sources for any kind of artifact [#1477](https://github.com/GoogleContainerTools/skaffold/pull/1477) +* feat(docker creds) can mount docker config into kaniko pod [#1466](https://github.com/GoogleContainerTools/skaffold/pull/1466) +* Support Jib on Google Cloud Build [#1478](https://github.com/GoogleContainerTools/skaffold/pull/1478) + + +Fixes: +* fix search URL for skaffold.dev + github edit link [#1417](https://github.com/GoogleContainerTools/skaffold/pull/1417) +* Print error messages when containers can’t be started [#1415](https://github.com/GoogleContainerTools/skaffold/pull/1415) +* Script should be executable [#1423](https://github.com/GoogleContainerTools/skaffold/pull/1423) +* Fix port-forwarding not being triggered. [#1433](https://github.com/GoogleContainerTools/skaffold/pull/1433) +* Fix localDir context for Kaniko on Windows [#1438](https://github.com/GoogleContainerTools/skaffold/pull/1438) +* Remove spurious warning [#1442](https://github.com/GoogleContainerTools/skaffold/pull/1442) +* Test what was actually deployed [#1462](https://github.com/GoogleContainerTools/skaffold/pull/1462) +* Fix jib tagging [#1475](https://github.com/GoogleContainerTools/skaffold/pull/1475) + + +Updates & refactorings: +* Move trigger related code to the watcher [#1422](https://github.com/GoogleContainerTools/skaffold/pull/1422) +* Simplify fake docker api [#1424](https://github.com/GoogleContainerTools/skaffold/pull/1424) +* Small improvements gcb [#1425](https://github.com/GoogleContainerTools/skaffold/pull/1425) +* Small improvements to kaniko builder [#1426](https://github.com/GoogleContainerTools/skaffold/pull/1426) +* Update golangci lint [#1430](https://github.com/GoogleContainerTools/skaffold/pull/1430) +* Refactor docker api [#1429](https://github.com/GoogleContainerTools/skaffold/pull/1429) +* Use latest release of Jib [#1440](https://github.com/GoogleContainerTools/skaffold/pull/1440) +* Refactor FakeCmd [#1456](https://github.com/GoogleContainerTools/skaffold/pull/1456) +* Use cmd.Run() indirection [#1457](https://github.com/GoogleContainerTools/skaffold/pull/1457) +* Clear error message for unsupported artifact on GCB [#1453](https://github.com/GoogleContainerTools/skaffold/pull/1453) +* Improve port-forwarding [#1452](https://github.com/GoogleContainerTools/skaffold/pull/1452) +* Minor changes to kaniko builder [#1461](https://github.com/GoogleContainerTools/skaffold/pull/1461) +* Show duplication in jib code [#1454](https://github.com/GoogleContainerTools/skaffold/pull/1454) +* Remove some duplication in Jib builder [#1465](https://github.com/GoogleContainerTools/skaffold/pull/1465) +* Use Maven wrapper for Jib example easier start. [#1471](https://github.com/GoogleContainerTools/skaffold/pull/1471) +* Simplify docker.AddTag() [#1464](https://github.com/GoogleContainerTools/skaffold/pull/1464) +* Embed labelling into Deployers [#1463](https://github.com/GoogleContainerTools/skaffold/pull/1463) +* Refactor port forwarding [#1474](https://github.com/GoogleContainerTools/skaffold/pull/1474) + + +Docs updates: +* CLI reference docs automation [#1418](https://github.com/GoogleContainerTools/skaffold/pull/1418) +* installation link to readme [#1437](https://github.com/GoogleContainerTools/skaffold/pull/1437) +* docs: typo + add setValueTemplates usecase [#1450](https://github.com/GoogleContainerTools/ +* fix(docs) updated references for imageName to be image [#1468](https://github.com/GoogleContainerTools/skaffold/pull/1468) +* More fixes to the builders doc [#1469](https://github.com/GoogleContainerTools/skaffold/pull/1469) +* fix: correct spelling of Kaninko to Kaniko [#1472](https://github.com/GoogleContainerTools/skaffold/pull/1472) + +Huge thank you for this release towards our contributors: + +- Balint Pato +- Bruno Miguel Custodio +- Cedric Kring +- David Gageot +- Gareth Evans +- George Oakling +- Ivan Portyankin +- Lionel Touati +- Matt Rickard +- Matti Paksula +- Nick Kubala +- Priya Wadhwa + + # v0.20.0 Release - 12/21/2018 Note: This release comes with a config change, use `skaffold fix` to permanently upgrade your config to `v1beta2`, however old versions are now auto-upgraded. diff --git a/code-of-conduct.md b/CODE_OF_CONDUCT.md similarity index 100% rename from code-of-conduct.md rename to CODE_OF_CONDUCT.md diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 399058110ba..0ad6b4fb6c7 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -79,7 +79,7 @@ To build with your local changes you have two options: If you are iterating on skaffold and want to see your changes in action, you can: -1. [Build skaffold](#building-and-running-skaffold) +1. [Build skaffold](#building-skaffold) 2. [Use the quickstart example](README.md#iterative-development) ## Testing skaffold diff --git a/Gopkg.lock b/Gopkg.lock index 0e30ffc0c63..b0f4bc1e6d2 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -235,6 +235,25 @@ revision = "1615341f118ae12f353cc8a983f35b584342c9b3" version = "v1.12.0" +[[projects]] + digest = "1:053c8b82c44fc384cca33724d4359235fa98e0cb8b135c392d8e14f396a2a7e8" + name = "github.com/gdamore/encoding" + packages = ["."] + pruneopts = "NUT" + revision = "6289cdc94c00ac4aa177771c5fce7af2f96b626d" + version = "v1.0.0" + +[[projects]] + digest = "1:09b6d7ce76747ac52e2122e15eff2965e47f537f9ddf5e62acc2d79007ce9e07" + name = "github.com/gdamore/tcell" + packages = [ + ".", + "terminfo", + ] + pruneopts = "NUT" + revision = "aaadc574a6ed8dc3abe56036ca130dcee1ee6b6e" + version = "v1.1.1" + [[projects]] digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" name = "github.com/ghodss/yaml" @@ -300,7 +319,7 @@ [[projects]] branch = "master" - digest = "1:900b120797732528f188d4cb7485d0a33f40e2867fa7f9086494966ea2203bac" + digest = "1:6999705ae5d573f1b6f933d280f5fc2d53448de149c6b2d5710b680dcd7c3ed4" name = "github.com/google/go-containerregistry" packages = [ "pkg/authn", @@ -310,6 +329,7 @@ "pkg/v1/remote", "pkg/v1/remote/transport", "pkg/v1/stream", + "pkg/v1/tarball", "pkg/v1/types", "pkg/v1/v1util", ] @@ -403,6 +423,33 @@ pruneopts = "NUT" revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" +[[projects]] + branch = "master" + digest = "1:26159b03ea04b155cb70d071c32bf599f0519548f8724be87df85fe8fd0a33a3" + name = "github.com/hashicorp/go-hclog" + packages = ["."] + pruneopts = "NUT" + revision = "4783caec6f2e5cdd47fab8b2bb47ce2ce5c546b7" + +[[projects]] + branch = "master" + digest = "1:8c38df748ea4cf82f8a5185eb3e6292a1047b76c90c8c8e8b924451e043ba7e4" + name = "github.com/hashicorp/go-plugin" + packages = [ + ".", + "internal/proto", + ] + pruneopts = "NUT" + revision = "362c99b11937c6a84686ee5726a8170e921ab406" + +[[projects]] + branch = "master" + digest = "1:8deb0c5545c824dfeb0ac77ab8eb67a3d541eab76df5c85ce93064ef02d44cd0" + name = "github.com/hashicorp/yamux" + packages = ["."] + pruneopts = "NUT" + revision = "2f1d1f20f75d5404f53b9edf6b53ed5505508675" + [[projects]] digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" name = "github.com/imdario/mergo" @@ -459,6 +506,22 @@ revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" version = "v1.0.1" +[[projects]] + digest = "1:27885e80e300ed44b6ccaa01b4db40e155e374ed12410ac1aa8f1fe4cc69b2bf" + name = "github.com/krishicks/yaml-patch" + packages = ["."] + pruneopts = "NUT" + revision = "83cc9ac50becbbfafb86a89167f3bc5372e8e530" + version = "v0.0.10" + +[[projects]] + digest = "1:ed9924c64c12bb89cc6fe944985904f9c572a4ce38d9ad132716ef8c68237237" + name = "github.com/lucasb-eyer/go-colorful" + packages = ["."] + pruneopts = "NUT" + revision = "345fbb3dbcdb252d9985ee899a84963c0fa24c82" + version = "v1.0" + [[projects]] digest = "1:08c231ec84231a7e23d67e4b58f975e1423695a32467a362ee55a803f9de8061" name = "github.com/mattn/go-colorable" @@ -475,6 +538,14 @@ revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" version = "v0.0.4" +[[projects]] + digest = "1:70ef8268170621826f8c111ac1674afe75136f526f449453b4a6631c6dba1946" + name = "github.com/mattn/go-runewidth" + packages = ["."] + pruneopts = "NUT" + revision = "3ee7d812e62a0804a7d0a324e0249ca2db3476d3" + version = "v0.0.4" + [[projects]] digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" @@ -499,6 +570,14 @@ revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" version = "v1.0.0" +[[projects]] + digest = "1:18b773b92ac82a451c1276bd2776c1e55ce057ee202691ab33c8d6690efcc048" + name = "github.com/mitchellh/go-testing-interface" + packages = ["."] + pruneopts = "NUT" + revision = "6d0b8010fcc857872e42fc6c931227569016843c" + version = "v1.0.0" + [[projects]] digest = "1:5b4289f0f483890552edcb868d9834ff96ffdb68847fa82ec45764cc4a83d8b8" name = "github.com/moby/buildkit" @@ -527,6 +606,14 @@ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" +[[projects]] + digest = "1:3b517122f3aad1ecce45a630ea912b3092b4729f25532a911d0cb2935a1f9352" + name = "github.com/oklog/run" + packages = ["."] + pruneopts = "NUT" + revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39" + version = "v1.0.0" + [[projects]] digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0" name = "github.com/opencontainers/go-digest" @@ -631,6 +718,14 @@ pruneopts = "NUT" revision = "619930b0b4713cc1280189bf0a4c54b3fb506f60" +[[projects]] + branch = "master" + digest = "1:d8cda3d9486dbc1ed442e36b9cbe69df4500af24f33fca217998e198d22b5bd7" + name = "github.com/rivo/tview" + packages = ["."] + pruneopts = "NUT" + revision = "84fdb36408f35b8998d48d323f55f40b7f14a19b" + [[projects]] digest = "1:b0e0e2abf5c70fd0f7f6c053c6c99c6960149146e40d5c7547cacc176e5d9973" name = "github.com/rjeczalik/notify" @@ -647,6 +742,14 @@ revision = "1744e2970ca51c86172c8190fadad617561ed6e7" version = "v1.0.0" +[[projects]] + digest = "1:debf1a119378d059b68925f1796851b6855bfc2f55419a50d634ecce3eabd8e8" + name = "github.com/shurcooL/sanitized_anchor_name" + packages = ["."] + pruneopts = "NUT" + revision = "7bfe4c7ecddb3666a94b053b422cdd8f5aaa3615" + version = "v1.0.0" + [[projects]] digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406" name = "github.com/sirupsen/logrus" @@ -771,11 +874,13 @@ revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31" [[projects]] - digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" + digest = "1:abee7d95958e649b0319b44f4964e55db9fa4098cef5b27e829143421e9a2376" name = "golang.org/x/text" packages = [ "collate", "collate/build", + "encoding", + "encoding/internal/identifier", "internal/colltab", "internal/gen", "internal/tag", @@ -851,7 +956,7 @@ revision = "31ac5d88444a9e7ad18077db9a165d793ad06a2e" [[projects]] - digest = "1:2f91d3e11b666570f8c923912f1cc8cf2f0c6b7371b2687ee67a8f73f08c6272" + digest = "1:f112c6f3e9bbd16eb113db8f37adb6bd10270142079d00c7ac625534cbb16475" name = "google.golang.org/grpc" packages = [ ".", @@ -864,6 +969,8 @@ "encoding", "encoding/proto", "grpclog", + "health", + "health/grpc_health_v1", "internal", "internal/backoff", "internal/channelz", @@ -904,6 +1011,14 @@ revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" +[[projects]] + digest = "1:926cba3a3c1cddfcfad996917e5d72fb4726536dbdc7264fa936e8c99890e558" + name = "gopkg.in/russross/blackfriday.v2" + packages = ["."] + pruneopts = "NUT" + revision = "d3b5b032dc8e8927d31a5071b56e14c89f045135" + version = "v2.0.1" + [[projects]] digest = "1:1cf1388ec8c73b7ecc711d9f279ab631ea0a6964d1ccc32809a6be90c33fa2a0" name = "gopkg.in/src-d/go-billy.v4" @@ -1204,14 +1319,17 @@ "github.com/google/go-containerregistry/pkg/name", "github.com/google/go-containerregistry/pkg/v1", "github.com/google/go-containerregistry/pkg/v1/remote", - "github.com/google/go-containerregistry/pkg/v1/remote/transport", + "github.com/google/go-containerregistry/pkg/v1/tarball", "github.com/google/go-github/github", + "github.com/hashicorp/go-plugin", "github.com/karrick/godirwalk", + "github.com/krishicks/yaml-patch", "github.com/mitchellh/go-homedir", "github.com/moby/buildkit/frontend/dockerfile/command", "github.com/moby/buildkit/frontend/dockerfile/parser", "github.com/moby/buildkit/frontend/dockerfile/shell", "github.com/pkg/errors", + "github.com/rivo/tview", "github.com/rjeczalik/notify", "github.com/sirupsen/logrus", "github.com/spf13/cobra", @@ -1219,17 +1337,16 @@ "golang.org/x/crypto/ssh/terminal", "golang.org/x/oauth2", "golang.org/x/oauth2/google", - "golang.org/x/sync/errgroup", "google.golang.org/api/cloudbuild/v1", "google.golang.org/api/googleapi", "google.golang.org/api/iterator", "gopkg.in/AlecAivazis/survey.v1", + "gopkg.in/russross/blackfriday.v2", "gopkg.in/src-d/go-git.v4", "gopkg.in/src-d/go-git.v4/plumbing", "gopkg.in/src-d/go-git.v4/plumbing/object", "gopkg.in/yaml.v2", "k8s.io/api/apps/v1", - "k8s.io/api/batch/v1", "k8s.io/api/core/v1", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/api/meta", diff --git a/MAINTAINERS b/MAINTAINERS index 93d76c09d01..26247bd880a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3,4 +3,5 @@ Dan Lorenc David Gageot Matt Rickard Nick Kubala +Priya Wadhwa Vic Iglesias diff --git a/Makefile b/Makefile index de9b989cb8f..46823689645 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,6 +48,7 @@ GO_LDFLAGS += -X $(VERSION_PACKAGE).gitTreeState=$(if $(shell git status --porce GO_LDFLAGS +=" GO_FILES := $(shell find . -type f -name '*.go' -not -path "./vendor/*") +GO_BUILD_TAGS := "kqueue" DOCSY_COMMIT:=a7141a2eac26cb598b707cab87d224f9105c315d @@ -55,7 +56,7 @@ $(BUILD_DIR)/$(PROJECT): $(BUILD_DIR)/$(PROJECT)-$(GOOS)-$(GOARCH) cp $(BUILD_DIR)/$(PROJECT)-$(GOOS)-$(GOARCH) $@ $(BUILD_DIR)/$(PROJECT)-%-$(GOARCH): $(GO_FILES) $(BUILD_DIR) - GOOS=$* GOARCH=$(GOARCH) CGO_ENABLED=1 go build -ldflags $(GO_LDFLAGS) -gcflags $(GO_GCFLAGS) -asmflags $(GO_ASMFLAGS) -o $@ $(BUILD_PACKAGE) + GOOS=$* GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -gcflags $(GO_GCFLAGS) -asmflags $(GO_ASMFLAGS) -tags $(GO_BUILD_TAGS) -o $@ $(BUILD_PACKAGE) %.sha256: % shasum -a 256 $< > $@ @@ -81,7 +82,7 @@ test: .PHONY: install install: $(GO_FILES) $(BUILD_DIR) - GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=1 go install -ldflags $(GO_LDFLAGS) -gcflags $(GO_GCFLAGS) -asmflags $(GO_ASMFLAGS) $(BUILD_PACKAGE) + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go install -ldflags $(GO_LDFLAGS) -gcflags $(GO_GCFLAGS) -asmflags $(GO_ASMFLAGS) -tags $(GO_BUILD_TAGS) $(BUILD_PACKAGE) .PHONY: integration integration: install $(BUILD_DIR)/$(PROJECT) @@ -109,7 +110,7 @@ release-in-docker: -t gcr.io/$(GCP_PROJECT)/skaffold-builder \ --target builder \ . - docker run \ + docker run --rm \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(HOME)/.config/gcloud:/root/.config/gcloud \ gcr.io/$(GCP_PROJECT)/skaffold-builder make -j release VERSION=$(VERSION) RELEASE_BUCKET=$(RELEASE_BUCKET) GCP_PROJECT=$(GCP_PROJECT) @@ -131,7 +132,7 @@ release-build-in-docker: -t gcr.io/$(GCP_PROJECT)/skaffold-builder \ --target builder \ . - docker run \ + docker run --rm \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(HOME)/.config/gcloud:/root/.config/gcloud \ gcr.io/$(GCP_PROJECT)/skaffold-builder make -j release-build RELEASE_BUCKET=$(RELEASE_BUCKET) GCP_PROJECT=$(GCP_PROJECT) @@ -146,7 +147,7 @@ integration-in-docker: -f deploy/skaffold/Dockerfile \ --target integration \ -t gcr.io/$(GCP_PROJECT)/skaffold-integration . - docker run \ + docker run --rm \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(HOME)/.config/gcloud:/root/.config/gcloud \ -v $(GOOGLE_APPLICATION_CREDENTIALS):$(GOOGLE_APPLICATION_CREDENTIALS) \ @@ -170,7 +171,7 @@ submit-release-trigger: #utilities for skaffold site - not used anywhere else .PHONY: docs-controller-image -docs-controller-image: +docs-controller-image: docker build -t gcr.io/$(GCP_PROJECT)/docs-controller -f deploy/webhook/Dockerfile . @@ -183,12 +184,18 @@ docs-preview-image: .PHONY: start-docs-preview start-docs-preview: docs-preview-image - docker run -ti -v $(PWD):/app --workdir /app/ -p 1313:1313 skaffold-docs-previewer bash -xc deploy/docs/preview.sh + docker run --rm -ti -v $(PWD):/app --workdir /app/ -p 1313:1313 skaffold-docs-previewer bash -xc deploy/docs/preview.sh .PHONY: build-docs-preview build-docs-preview: docs-preview-image - docker run -ti -v $(PWD):/app --workdir /app/ -p 1313:1313 skaffold-docs-previewer bash -xc deploy/docs/build.sh + docker run --rm -ti -v $(PWD):/app --workdir /app/ -p 1313:1313 skaffold-docs-previewer bash -xc deploy/docs/build.sh .PHONY: clean-docs-preview clean-docs-preview: docs-preview-image - docker run -ti -v $(PWD):/app --workdir /app/ -p 1313:1313 skaffold-docs-previewer bash -xc deploy/docs/clean.sh + docker run --rm -ti -v $(PWD):/app --workdir /app/ -p 1313:1313 skaffold-docs-previewer bash -xc deploy/docs/clean.sh + +# schema generation + +.PHONY: generate-schemas +generate-schemas: + go run hack/schemas/main.go diff --git a/README.md b/README.md index 07ca5d62f2c..6c237b72b80 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ stages into one simple command. Every time you run `skaffold dev`, the system 1. Cleans up deployed artifacts on exit (Ctrl+C) What's more, the pluggable architecture is central to Skaffold's design, allowing you to use -the tool you prefer in each stage. Also, skaffold's `profiles` feature grants +the tool you prefer in each stage. Also, Skaffold's `profiles` feature grants you the freedom to switch tools as you see fit depending on the context. For example, if you are coding on a local machine, you can configure Skaffold to build artifacts diff --git a/cmd/skaffold/app/cmd/build.go b/cmd/skaffold/app/cmd/build.go index af6818045c1..354f4ee0244 100644 --- a/cmd/skaffold/app/cmd/build.go +++ b/cmd/skaffold/app/cmd/build.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ import ( "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/flags" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -39,10 +40,12 @@ func NewCmdBuild(out io.Writer) *cobra.Command { Short: "Builds the artifacts", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { + opts.Command = "build" return runBuild(out) }, } AddRunDevFlags(cmd) + cmd.Flags().StringArrayVarP(&opts.TargetImages, "build-image", "b", nil, "Choose which artifacts to build. Artifacts with image names that contain the expression will be built only. Default is to build sources for all artifacts") cmd.Flags().BoolVarP(&quietFlag, "quiet", "q", false, "Suppress the build output and print image built on success") cmd.Flags().VarP(buildFormatFlag, "output", "o", buildFormatFlag.Usage()) return cmd @@ -68,7 +71,14 @@ func runBuild(out io.Writer) error { buildOut = ioutil.Discard } - bRes, err := runner.BuildAndTest(ctx, buildOut, config.Build.Artifacts) + var targetArtifacts []*latest.Artifact + for _, artifact := range config.Build.Artifacts { + if runner.IsTargetImage(artifact) { + targetArtifacts = append(targetArtifacts, artifact) + } + } + + bRes, err := runner.BuildAndTest(ctx, buildOut, targetArtifacts) if err != nil { return err } diff --git a/cmd/skaffold/app/cmd/cmd.go b/cmd/skaffold/app/cmd/cmd.go index 75381195ca6..b0dff825f03 100644 --- a/cmd/skaffold/app/cmd/cmd.go +++ b/cmd/skaffold/app/cmd/cmd.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ limitations under the License. package cmd import ( - "context" "fmt" "io" "os" "strings" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/update" @@ -34,9 +34,10 @@ import ( ) var ( - opts = &config.SkaffoldOptions{} - v string - overwrite bool + opts = &config.SkaffoldOptions{} + v string + defaultColor int + overwrite bool updateMsg = make(chan string) ) @@ -58,6 +59,8 @@ func NewSkaffoldCommand(out, err io.Writer) *cobra.Command { logrus.Infof("update check failed: %s", err) } }() + + color.OverwriteDefault(color.Color(defaultColor)) return nil } @@ -83,6 +86,7 @@ func NewSkaffoldCommand(out, err io.Writer) *cobra.Command { rootCmd.AddCommand(NewCmdDiagnose(out)) rootCmd.PersistentFlags().StringVarP(&v, "verbosity", "v", constants.DefaultLogLevel.String(), "Log level (debug, info, warn, error, fatal, panic)") + rootCmd.PersistentFlags().IntVar(&defaultColor, "color", int(color.Default), "Specify the default output color in ANSI escape codes") setFlagsFromEnvVariables(rootCmd.Commands()) @@ -98,16 +102,12 @@ func updateCheck(ch chan string) error { logrus.Debugf("Update check not enabled, skipping.") return nil } - current, err := version.ParseVersion(version.Get().Version) - if err != nil { - return errors.Wrap(err, "parsing current semver, skipping update check") - } - latest, err := update.GetLatestVersion(context.Background()) + latest, current, err := update.GetLatestAndCurrentVersion() if err != nil { - return errors.Wrap(err, "getting latest version") + return errors.Wrap(err, "get latest and current Skaffold version") } if latest.GT(current) { - ch <- fmt.Sprintf("There is a new version (%s) of skaffold available. Download it at %s\n", latest, constants.LatestDownloadURL) + ch <- fmt.Sprintf("There is a new version (%s) of Skaffold available. Download it at %s\n", latest, constants.LatestDownloadURL) } return nil } diff --git a/cmd/skaffold/app/cmd/cmd_test.go b/cmd/skaffold/app/cmd/cmd_test.go index 4fae63b1016..80dfb5227ec 100644 --- a/cmd/skaffold/app/cmd/cmd_test.go +++ b/cmd/skaffold/app/cmd/cmd_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/cmd/completion.go b/cmd/skaffold/app/cmd/completion.go index d899c091378..33f8830dea7 100644 --- a/cmd/skaffold/app/cmd/completion.go +++ b/cmd/skaffold/app/cmd/completion.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,30 +24,46 @@ import ( "github.com/spf13/cobra" ) -// completionCmd represents the completion command +const longDescription = ` + Outputs shell completion for the given shell (bash or zsh) + + This depends on the bash-completion binary. Example installation instructions: + OS X: + $ brew install bash-completion + $ source $(brew --prefix)/etc/bash_completion + $ skaffold completion bash > ~/.skaffold-completion # for bash users + $ skaffold completion zsh > ~/.skaffold-completion # for zsh users + $ source ~/.skaffold-completion + Ubuntu: + $ apt-get install bash-completion + $ source /etc/bash-completion + $ source <(skaffold completion bash) # for bash users + $ source <(skaffold completion zsh) # for zsh users + + Additionally, you may want to output the completion to a file and source in your .bashrc +` + var completionCmd = &cobra.Command{ - // Only bash is supported for now. However, having args after - // "completion" will help when supporting multiple shells - Use: "completion bash", + Use: "completion SHELL", Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("requires 1 arg, found %d", len(args)) } return cobra.OnlyValidArgs(cmd, args) }, - ValidArgs: []string{"bash"}, - Short: "Output command completion script for the bash shell", - Long: `To enable command completion run - -eval "$(skaffold completion bash)" - -To configure bash shell completion for all your sessions, add the following to your -~/.bashrc or ~/.bash_profile: + ValidArgs: []string{"bash", "zsh"}, + Short: "Output shell completion for the given shell (bash or zsh)", + Long: longDescription, + Run: completion, +} -eval "$(skaffold completion bash)"`, - Run: func(cmd *cobra.Command, args []string) { +func completion(_cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": rootCmd.GenBashCompletion(os.Stdout) - }, + case "zsh": + rootCmd.GenZshCompletion(os.Stdout) + } } // NewCmdCompletion returns the cobra command that outputs shell completion code diff --git a/cmd/skaffold/app/cmd/config.go b/cmd/skaffold/app/cmd/config.go index c7ac119b8df..abaf96eebc1 100644 --- a/cmd/skaffold/app/cmd/config.go +++ b/cmd/skaffold/app/cmd/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import ( func NewCmdConfig(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "config", - Short: "A set of commands for interacting with the skaffold config.", + Short: "A set of commands for interacting with the Skaffold config.", } cmd.AddCommand(config.NewCmdSet(out)) diff --git a/cmd/skaffold/app/cmd/config/config.go b/cmd/skaffold/app/cmd/config/config.go index f112587ee61..ecefb5e89e6 100644 --- a/cmd/skaffold/app/cmd/config/config.go +++ b/cmd/skaffold/app/cmd/config/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ type Config struct { // ContextConfig is the context-specific config information provided in // the global Skaffold config. type ContextConfig struct { - Kubecontext string `yaml:"kube-context,omitempty"` - DefaultRepo string `yaml:"default-repo,omitempty"` + Kubecontext string `yaml:"kube-context,omitempty"` + DefaultRepo string `yaml:"default-repo,omitempty"` + LocalCluster *bool `yaml:"local-cluster,omitempty"` } diff --git a/cmd/skaffold/app/cmd/config/config_test.go b/cmd/skaffold/app/cmd/config/config_test.go index 527c04015a7..aa0300d0dad 100644 --- a/cmd/skaffold/app/cmd/config/config_test.go +++ b/cmd/skaffold/app/cmd/config/config_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package config import ( "testing" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/GoogleContainerTools/skaffold/testutil" yaml "gopkg.in/yaml.v2" ) @@ -98,6 +99,40 @@ func TestSetAndUnsetConfig(t *testing.T) { }, }, }, + { + name: "set local cluster", + key: "local-cluster", + value: "false", + kubecontext: "this_is_a_context", + expectedSetCfg: &Config{ + ContextConfigs: []*ContextConfig{ + { + Kubecontext: "this_is_a_context", + LocalCluster: util.BoolPtr(false), + }, + }, + }, + expectedUnsetCfg: &Config{ + ContextConfigs: []*ContextConfig{ + { + Kubecontext: "this_is_a_context", + }, + }, + }, + }, + { + name: "set invalid local cluster", + key: "local-cluster", + shouldErrSet: true, + value: "not-a-bool", + expectedSetCfg: &Config{ + ContextConfigs: []*ContextConfig{ + { + Kubecontext: dummyContext, + }, + }, + }, + }, { name: "set fake value", key: "not_a_real_value", @@ -126,6 +161,22 @@ func TestSetAndUnsetConfig(t *testing.T) { ContextConfigs: []*ContextConfig{}, }, }, + { + name: "set global local cluster", + key: "local-cluster", + value: "true", + global: true, + expectedSetCfg: &Config{ + Global: &ContextConfig{ + LocalCluster: util.BoolPtr(true), + }, + ContextConfigs: []*ContextConfig{}, + }, + expectedUnsetCfg: &Config{ + Global: &ContextConfig{}, + ContextConfigs: []*ContextConfig{}, + }, + }, } for _, test := range tests { diff --git a/cmd/skaffold/app/cmd/config/flags.go b/cmd/skaffold/app/cmd/config/flags.go index 2a974426965..0e470541196 100644 --- a/cmd/skaffold/app/cmd/config/flags.go +++ b/cmd/skaffold/app/cmd/config/flags.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/cmd/config/list.go b/cmd/skaffold/app/cmd/config/list.go index 7aca39efa66..1f5a6d64a50 100644 --- a/cmd/skaffold/app/cmd/config/list.go +++ b/cmd/skaffold/app/cmd/config/list.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ import ( func NewCmdList(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "list", - Short: "List all values set in the global skaffold config", + Short: "List all values set in the global Skaffold config", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { return runList(out) diff --git a/cmd/skaffold/app/cmd/config/set.go b/cmd/skaffold/app/cmd/config/set.go index 5595541f09b..f600ad0c12e 100644 --- a/cmd/skaffold/app/cmd/config/set.go +++ b/cmd/skaffold/app/cmd/config/set.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ import ( "io" "io/ioutil" "reflect" + "strconv" "strings" "github.com/pkg/errors" @@ -31,7 +32,7 @@ import ( func NewCmdSet(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "set", - Short: "Set a value in the global skaffold config", + Short: "Set a value in the global Skaffold config", Args: cobra.ExactArgs(2), RunE: func(cmd *cobra.Command, args []string) error { if err := setConfigValue(args[0], args[1]); err != nil { @@ -46,12 +47,29 @@ func NewCmdSet(out io.Writer) *cobra.Command { return cmd } -func setConfigValue(name string, value interface{}) error { +func setConfigValue(name string, value string) error { cfg, err := getOrCreateConfigForKubectx() if err != nil { return err } + fieldName := getFieldName(cfg, name) + if fieldName == "" { + return fmt.Errorf("%s is not a valid config field", name) + } + + field := reflect.Indirect(reflect.ValueOf(cfg)).FieldByName(fieldName) + val, err := parseAsType(value, field.Type()) + if err != nil { + return fmt.Errorf("%s is not a valid value for field %s", value, name) + } + + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).Set(val) + + return writeConfig(cfg) +} + +func getFieldName(cfg *ContextConfig, name string) string { cfgValue := reflect.Indirect(reflect.ValueOf(cfg)) var fieldName string for i := 0; i < cfgValue.NumField(); i++ { @@ -62,20 +80,25 @@ func setConfigValue(name string, value interface{}) error { } } } - if fieldName == "" { - return fmt.Errorf("%s is not a valid config field", name) - } - fieldValue := cfgValue.FieldByName(fieldName) - - fieldType := fieldValue.Type() - val := reflect.ValueOf(value) + return fieldName +} - if fieldType != val.Type() { - return fmt.Errorf("%s is not a valid value for field %s", value, fieldName) +func parseAsType(value string, fieldType reflect.Type) (reflect.Value, error) { + switch fieldType.String() { + case "string": + return reflect.ValueOf(value), nil + case "*bool": + if value == "" { + return reflect.Zero(fieldType), nil + } + valBase, err := strconv.ParseBool(value) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(&valBase), nil + default: + return reflect.Value{}, fmt.Errorf("unsupported type: %s", fieldType) } - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).Set(val) - - return writeConfig(cfg) } func writeConfig(cfg *ContextConfig) error { diff --git a/cmd/skaffold/app/cmd/config/unset.go b/cmd/skaffold/app/cmd/config/unset.go index 206ca06caa3..4bc3783d964 100644 --- a/cmd/skaffold/app/cmd/config/unset.go +++ b/cmd/skaffold/app/cmd/config/unset.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import ( func NewCmdUnset(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "unset", - Short: "Unset a value in the global skaffold config", + Short: "Unset a value in the global Skaffold config", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { resolveKubectlContext() diff --git a/cmd/skaffold/app/cmd/config/util.go b/cmd/skaffold/app/cmd/config/util.go index 1bf4211b446..f911a29997a 100644 --- a/cmd/skaffold/app/cmd/config/util.go +++ b/cmd/skaffold/app/cmd/config/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "io/ioutil" "path/filepath" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes/context" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" @@ -172,3 +173,34 @@ func GetDefaultRepo(cliValue string) (string, error) { return defaultRepo, nil } + +func GetLocalCluster() (bool, error) { + cfg, err := GetConfigForKubectx() + localCluster := isDefaultLocal(kubecontext) + if err != nil { + return localCluster, errors.Wrap(err, "retrieving global config") + } + + if cfg != nil { + if cfg.LocalCluster != nil { + localCluster = *cfg.LocalCluster + } + } else { + // if no value is set for this cluster, fall back to the global setting + globalCfg, err := GetGlobalConfig() + if err != nil { + return localCluster, errors.Wrap(err, "retrieving global config") + } + if globalCfg != nil && globalCfg.LocalCluster != nil { + localCluster = *globalCfg.LocalCluster + } + } + + return localCluster, nil +} + +func isDefaultLocal(kubeContext string) bool { + return kubeContext == constants.DefaultMinikubeContext || + kubeContext == constants.DefaultDockerForDesktopContext || + kubeContext == constants.DefaultDockerDesktopContext +} diff --git a/cmd/skaffold/app/cmd/delete.go b/cmd/skaffold/app/cmd/delete.go index 0dffd39f0a8..b0662d962cc 100644 --- a/cmd/skaffold/app/cmd/delete.go +++ b/cmd/skaffold/app/cmd/delete.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,6 +31,7 @@ func NewCmdDelete(out io.Writer) *cobra.Command { Short: "Delete the deployed resources", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { + opts.Command = "delete" return delete(out) }, } diff --git a/cmd/skaffold/app/cmd/deploy.go b/cmd/skaffold/app/cmd/deploy.go index d74e7d7d3af..303d7323b78 100644 --- a/cmd/skaffold/app/cmd/deploy.go +++ b/cmd/skaffold/app/cmd/deploy.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,6 +30,7 @@ func NewCmdDeploy(out io.Writer) *cobra.Command { Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { // Same actions as `skaffold run`, but with pre-built images. + opts.Command = "deploy" return run(out) }, } diff --git a/cmd/skaffold/app/cmd/dev.go b/cmd/skaffold/app/cmd/dev.go index 1ba30318222..b7fc65ed021 100644 --- a/cmd/skaffold/app/cmd/dev.go +++ b/cmd/skaffold/app/cmd/dev.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,9 +19,13 @@ package cmd import ( "context" "io" + "strings" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/runner" "github.com/pkg/errors" + "github.com/rivo/tview" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -33,24 +37,29 @@ func NewCmdDev(out io.Writer) *cobra.Command { Short: "Runs a pipeline file in development mode", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - return dev(out) + opts.Command = "dev" + return dev(out, opts.ExperimentalGUI) }, } AddRunDevFlags(cmd) cmd.Flags().BoolVar(&opts.TailDev, "tail", true, "Stream logs from deployed objects") cmd.Flags().StringVar(&opts.Trigger, "trigger", "polling", "How are changes detected? (polling, manual or notify)") cmd.Flags().BoolVar(&opts.Cleanup, "cleanup", true, "Delete deployments after dev mode is interrupted") - cmd.Flags().StringArrayVarP(&opts.Watch, "watch-image", "w", nil, "Choose which artifacts to watch. Artifacts with image names that contain the expression will be watched only. Default is to watch sources for all artifacts") + cmd.Flags().StringArrayVarP(&opts.TargetImages, "watch-image", "w", nil, "Choose which artifacts to watch. Artifacts with image names that contain the expression will be watched only. Default is to watch sources for all artifacts") cmd.Flags().IntVarP(&opts.WatchPollInterval, "watch-poll-interval", "i", 1000, "Interval (in ms) between two checks for file changes") cmd.Flags().BoolVar(&opts.PortForward, "port-forward", true, "Port-forward exposed container ports within pods") cmd.Flags().StringArrayVarP(&opts.CustomLabels, "label", "l", nil, "Add custom labels to deployed objects. Set multiple times for multiple labels") + cmd.Flags().BoolVar(&opts.ExperimentalGUI, "experimental-gui", false, "Experimental Graphical User Interface") + return cmd } -func dev(out io.Writer) error { +func dev(out io.Writer, ui bool) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - catchCtrlC(cancel) + if !ui { + catchCtrlC(cancel) + } cleanup := func() {} if opts.Cleanup { @@ -59,6 +68,25 @@ func dev(out io.Writer) error { }() } + var ( + app *tview.Application + output *config.Output + ) + if ui { + app, output = createApp() + defer app.Stop() + + go func() { + app.Run() + cancel() + }() + } else { + output = &config.Output{ + Main: out, + Logs: out, + } + } + for { select { case <-ctx.Done(): @@ -69,7 +97,7 @@ func dev(out io.Writer) error { return errors.Wrap(err, "creating runner") } - err = r.Dev(ctx, out, config.Build.Artifacts) + err = r.Dev(ctx, output, config.Build.Artifacts) if r.HasDeployed() { cleanup = func() { if err := r.Cleanup(context.Background(), out); err != nil { @@ -85,3 +113,78 @@ func dev(out io.Writer) error { } } } + +func createApp() (*tview.Application, *config.Output) { + app := tview.NewApplication() + + mainView := tview.NewTextView() + mainView. + SetChangedFunc(func() { + app.Draw() + }). + SetDynamicColors(true). + SetBorder(true). + SetTitle("Build") + + logsView := tview.NewTextView() + logsView. + SetChangedFunc(func() { + app.Draw() + }). + SetDynamicColors(true). + SetBorder(true). + SetTitle("Logs") + + grid := tview.NewGrid() + grid. + SetRows(0, 0). + SetColumns(0). + SetBorders(false). + AddItem(mainView, 0, 0, 1, 1, 0, 0, false). + AddItem(logsView, 1, 0, 1, 1, 0, 0, false) + + app. + SetRoot(grid, true). + SetFocus(grid) + + output := &config.Output{ + Main: color.ColoredWriter{Writer: ansiWriter(mainView)}, + Logs: color.ColoredWriter{Writer: ansiWriter(logsView)}, + } + + return app, output +} + +func ansiWriter(writer io.Writer) io.Writer { + return &ansi{ + Writer: writer, + replacer: strings.NewReplacer( + "\033[31m", "[maroon]", + "\033[32m", "[green]", + "\033[33m", "[olive]", + "\033[34m", "[navy]", + "\033[35m", "[purple]", + "\033[36m", "[teal]", + "\033[37m", "[silver]", + + "\033[91m", "[red]", + "\033[92m", "[lime]", + "\033[93m", "[yellow]", + "\033[94m", "[blue]", + "\033[95m", "[fuchsia]", + "\033[96m", "[aqua]", + "\033[97m", "[white]", + + "\033[0m", "", + ), + } +} + +type ansi struct { + io.Writer + replacer *strings.Replacer +} + +func (a *ansi) Write(text []byte) (int, error) { + return a.replacer.WriteString(a.Writer, string(text)) +} diff --git a/cmd/skaffold/app/cmd/diagnose.go b/cmd/skaffold/app/cmd/diagnose.go index 4a6bf882677..d40e4782dfe 100644 --- a/cmd/skaffold/app/cmd/diagnose.go +++ b/cmd/skaffold/app/cmd/diagnose.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ import ( "io/ioutil" "time" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/runner" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/version" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/watch" @@ -49,7 +49,7 @@ func NewCmdDiagnose(out io.Writer) *cobra.Command { } func doDiagnose(out io.Writer) error { - _, config, err := newRunner(opts) + runner, config, err := newRunner(opts) if err != nil { return errors.Wrap(err, "creating runner") } @@ -58,14 +58,14 @@ func doDiagnose(out io.Writer) error { fmt.Fprintln(out, "Configuration version:", config.APIVersion) fmt.Fprintln(out, "Number of artifacts:", len(config.Build.Artifacts)) - if err := diagnoseArtifacts(out, config.Build.Artifacts); err != nil { + if err := diagnoseArtifacts(out, runner.Builder, config.Build.Artifacts); err != nil { return errors.Wrap(err, "running diagnostic on artifacts") } return nil } -func diagnoseArtifacts(out io.Writer, artifacts []*latest.Artifact) error { +func diagnoseArtifacts(out io.Writer, builder build.Builder, artifacts []*latest.Artifact) error { ctx := context.Background() for _, artifact := range artifacts { @@ -80,11 +80,11 @@ func diagnoseArtifacts(out io.Writer, artifacts []*latest.Artifact) error { fmt.Fprintf(out, " - Size of the context: %vbytes\n", size) } - timeDeps1, deps, err := timeToListDependencies(ctx, artifact) + timeDeps1, deps, err := timeToListDependencies(ctx, builder, artifact) if err != nil { return errors.Wrap(err, "listing artifact dependencies") } - timeDeps2, _, err := timeToListDependencies(ctx, artifact) + timeDeps2, _, err := timeToListDependencies(ctx, builder, artifact) if err != nil { return errors.Wrap(err, "listing artifact dependencies") } @@ -107,15 +107,10 @@ func diagnoseArtifacts(out io.Writer, artifacts []*latest.Artifact) error { return nil } -func timeToListDependencies(ctx context.Context, a *latest.Artifact) (time.Duration, []string, error) { +func timeToListDependencies(ctx context.Context, builder build.Builder, a *latest.Artifact) (time.Duration, []string, error) { start := time.Now() - - deps, err := runner.DependenciesForArtifact(ctx, a) - if err != nil { - return 0, nil, errors.Wrap(err, "listing artifact dependencies") - } - - return time.Since(start), deps, nil + paths, err := builder.DependenciesForArtifact(ctx, a) + return time.Since(start), paths, err } func timeToComputeMTimes(deps []string) (time.Duration, error) { diff --git a/cmd/skaffold/app/cmd/fix.go b/cmd/skaffold/app/cmd/fix.go index 25b1a4224b8..b33c575f233 100644 --- a/cmd/skaffold/app/cmd/fix.go +++ b/cmd/skaffold/app/cmd/fix.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,7 +31,7 @@ import ( func NewCmdFix(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "fix", - Short: "Converts old skaffold.yaml to newest schema version", + Short: "Converts old Skaffold config to newest schema version", RunE: func(cmd *cobra.Command, args []string) error { return runFix(out, opts.ConfigurationFile, overwrite) }, diff --git a/cmd/skaffold/app/cmd/fix_test.go b/cmd/skaffold/app/cmd/fix_test.go index 26d00d645de..941612f2baa 100644 --- a/cmd/skaffold/app/cmd/fix_test.go +++ b/cmd/skaffold/app/cmd/fix_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/cmd/init.go b/cmd/skaffold/app/cmd/init.go index 264a9fcb115..a666567269e 100644 --- a/cmd/skaffold/app/cmd/init.go +++ b/cmd/skaffold/app/cmd/init.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -54,19 +54,19 @@ var ( force bool ) -// NewCmdInit describes the CLI command to generate a skaffold configuration. +// NewCmdInit describes the CLI command to generate a Skaffold configuration. func NewCmdInit(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "init", - Short: "Automatically generate skaffold configuration for deploying an application", + Short: "Automatically generate Skaffold configuration for deploying an application", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { return doInit(out) }, } cmd.Flags().StringVarP(&opts.ConfigurationFile, "filename", "f", "skaffold.yaml", "Filename or URL to the pipeline file") - cmd.Flags().BoolVar(&skipBuild, "skip-build", false, "Skip generating build artifacts in skaffold config") - cmd.Flags().BoolVar(&force, "force", false, "Force the generation of the skaffold config") + cmd.Flags().BoolVar(&skipBuild, "skip-build", false, "Skip generating build artifacts in Skaffold config") + cmd.Flags().BoolVar(&force, "force", false, "Force the generation of the Skaffold config") cmd.Flags().StringVar(&composeFile, "compose-file", "", "Initialize from a docker-compose file") cmd.Flags().StringArrayVarP(&cliArtifacts, "artifact", "a", nil, "'='-delimited dockerfile/image pair to generate build artifact\n(example: --artifact=/web/Dockerfile.web=gcr.io/web-project/image)") return cmd diff --git a/cmd/skaffold/app/cmd/init_test.go b/cmd/skaffold/app/cmd/init_test.go index 43e6c7b116f..db2c17c794d 100644 --- a/cmd/skaffold/app/cmd/init_test.go +++ b/cmd/skaffold/app/cmd/init_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/cmd/run.go b/cmd/skaffold/app/cmd/run.go index edb3c035f8f..324ee73ad18 100644 --- a/cmd/skaffold/app/cmd/run.go +++ b/cmd/skaffold/app/cmd/run.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -32,6 +32,7 @@ func NewCmdRun(out io.Writer) *cobra.Command { Short: "Runs a pipeline file", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { + opts.Command = "run" err := run(out) if err == nil { tips.PrintForRun(out, opts) diff --git a/cmd/skaffold/app/cmd/runner.go b/cmd/skaffold/app/cmd/runner.go index 8a6edb8ddb3..0c5cff8d7e6 100644 --- a/cmd/skaffold/app/cmd/runner.go +++ b/cmd/skaffold/app/cmd/runner.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,31 +19,39 @@ package cmd import ( configutil "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/cmd/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/runner" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/defaults" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/update" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // newRunner creates a SkaffoldRunner and returns the SkaffoldPipeline associated with it. func newRunner(opts *config.SkaffoldOptions) (*runner.SkaffoldRunner, *latest.SkaffoldPipeline, error) { parsed, err := schema.ParseConfig(opts.ConfigurationFile, true) if err != nil { + latest, current, versionErr := update.GetLatestAndCurrentVersion() + if versionErr == nil && latest.GT(current) { + logrus.Warnf("Your Skaffold version might be too old. Download the latest version (%s) at %s\n", latest, constants.LatestDownloadURL) + } return nil, nil, errors.Wrap(err, "parsing skaffold config") } config := parsed.(*latest.SkaffoldPipeline) - if err := defaults.Set(config); err != nil { - return nil, nil, errors.Wrap(err, "setting default values") - } - err = schema.ApplyProfiles(config, opts.Profiles) + err = schema.ApplyProfiles(config, opts) if err != nil { return nil, nil, errors.Wrap(err, "applying profiles") } + if err := defaults.Set(config); err != nil { + return nil, nil, errors.Wrap(err, "setting default values") + } + defaultRepo, err := configutil.GetDefaultRepo(opts.DefaultRepo) if err != nil { return nil, nil, errors.Wrap(err, "getting default repo") diff --git a/cmd/skaffold/app/cmd/signals.go b/cmd/skaffold/app/cmd/signals.go index 5491201f2fe..3db5fec4a84 100644 --- a/cmd/skaffold/app/cmd/signals.go +++ b/cmd/skaffold/app/cmd/signals.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/cmd/signals_test.go b/cmd/skaffold/app/cmd/signals_test.go index 6a3ec465944..9b56e908769 100644 --- a/cmd/skaffold/app/cmd/signals_test.go +++ b/cmd/skaffold/app/cmd/signals_test.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/cmd/version.go b/cmd/skaffold/app/cmd/version.go index c8851ee02a9..64cf04ad524 100644 --- a/cmd/skaffold/app/cmd/version.go +++ b/cmd/skaffold/app/cmd/version.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/flags/template.go b/cmd/skaffold/app/flags/template.go index 028a3774542..8555de96692 100644 --- a/cmd/skaffold/app/flags/template.go +++ b/cmd/skaffold/app/flags/template.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/flags/template_test.go b/cmd/skaffold/app/flags/template_test.go index e55e1cf099c..32af4050c12 100644 --- a/cmd/skaffold/app/flags/template_test.go +++ b/cmd/skaffold/app/flags/template_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/app/skaffold.go b/cmd/skaffold/app/skaffold.go index 78cb9a9e23b..f7c0ae4c083 100644 --- a/cmd/skaffold/app/skaffold.go +++ b/cmd/skaffold/app/skaffold.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,9 +20,14 @@ import ( "os" "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/cmd" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/plugin" ) func Run() error { + if plugin.ShouldExecuteCorePlugin() { + return plugin.Execute() + } + c := cmd.NewSkaffoldCommand(os.Stdout, os.Stderr) return c.Execute() } diff --git a/cmd/skaffold/app/tips/tips.go b/cmd/skaffold/app/tips/tips.go index df534505aea..fdc2998fd80 100644 --- a/cmd/skaffold/app/tips/tips.go +++ b/cmd/skaffold/app/tips/tips.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/man/man.go b/cmd/skaffold/man/man.go index f2a7321df15..d5b66c036d0 100644 --- a/cmd/skaffold/man/man.go +++ b/cmd/skaffold/man/man.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/skaffold/skaffold.go b/cmd/skaffold/skaffold.go index 27962edab65..d489eb20a95 100644 --- a/cmd/skaffold/skaffold.go +++ b/cmd/skaffold/skaffold.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/deploy/docs/build.sh b/deploy/docs/build.sh index 044ba796f21..3ff86d4db0b 100755 --- a/deploy/docs/build.sh +++ b/deploy/docs/build.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ ## This script builds the Skaffold site assuming it's ran from a -## cloned skaffold repo with no submodules initialized. The script initializes the git submodules for +## cloned Skaffold repo with no submodules initialized. The script initializes the git submodules for ## the site's theme in a standard manner, thus this script can be used locally as well as for the PR review flow. set -x diff --git a/deploy/docs/clean.sh b/deploy/docs/clean.sh index f602279fa6d..3a92cf7dc7a 100755 --- a/deploy/docs/clean.sh +++ b/deploy/docs/clean.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/deploy/docs/cloudbuild-release.yaml b/deploy/docs/cloudbuild-release.yaml index 5b5b53c9375..6c1f68874ae 100644 --- a/deploy/docs/cloudbuild-release.yaml +++ b/deploy/docs/cloudbuild-release.yaml @@ -22,6 +22,8 @@ steps: - '.' - name: gcr.io/$PROJECT_ID/docs-controller:latest + env: + - 'HUGO_ENV=production' args: - 'bash' - '-xc' diff --git a/deploy/docs/cloudbuild.yaml b/deploy/docs/cloudbuild.yaml index 36361b05205..e794abe6442 100644 --- a/deploy/docs/cloudbuild.yaml +++ b/deploy/docs/cloudbuild.yaml @@ -22,6 +22,8 @@ steps: - '.' - name: gcr.io/$PROJECT_ID/docs-controller:latest + env: + - 'HUGO_ENV=production' args: - 'bash' - '-xc' diff --git a/deploy/docs/preview.sh b/deploy/docs/preview.sh index ffe5e7932cc..e3e6cd8f7d9 100755 --- a/deploy/docs/preview.sh +++ b/deploy/docs/preview.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ # limitations under the License. ## This script starts a preview of the Skaffold site assuming it's ran from a -## cloned skaffold repo with no submodules initialized. The script initializes the git submodules for +## cloned Skaffold repo with no submodules initialized. The script initializes the git submodules for ## the site's theme in a standard manner, thus this script can be used locally as well as for the PR review flow. set -x diff --git a/deploy/skaffold/Dockerfile b/deploy/skaffold/Dockerfile index f0b1a2efd70..c526026ff02 100644 --- a/deploy/skaffold/Dockerfile +++ b/deploy/skaffold/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2018 The Skaffold Authors All rights reserved. +# Copyright 2019 The Skaffold Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/deploy/webhook/Dockerfile b/deploy/webhook/Dockerfile index d4bd261e454..322412642d9 100644 --- a/deploy/webhook/Dockerfile +++ b/deploy/webhook/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2018 The Skaffold Authors All rights reserved. +# Copyright 2019 The Skaffold Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ ENV KUBECTL_VERSION v1.12.0 RUN curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \ chmod +x /usr/local/bin/kubectl -ENV HUGO_VERSION=0.51 +ENV HUGO_VERSION=0.54.0 RUN curl -LO https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_Linux-64bit.tar.gz && \ tar -xzf hugo_extended_${HUGO_VERSION}_Linux-64bit.tar.gz && \ mv hugo /usr/local/bin/hugo diff --git a/deprecation-policy.md b/deprecation-policy.md index 299a55cc1e9..3490109f5a6 100644 --- a/deprecation-policy.md +++ b/deprecation-policy.md @@ -10,7 +10,7 @@ Skaffold adopts the [Kubernetes deprecation policy for admin facing components]( **Breaking changes** A breaking change is when the primary functionality of a feature changes in a way that the user has to make changes to their workflows/configuration. -- **Breaking config change**: In case of skaffold's pipeline config (skaffold.yaml) a breaking change between an old and new version occurs when the skaffold binary cannot parse the input yaml with auto-upgrade. This can happen when the new version removes a feature or when the new version introduces a mandatory field with no default value +- **Breaking config change**: In case of Skaffold's pipeline config (skaffold.yaml) a breaking change between an old and new version occurs when the skaffold binary cannot parse the input yaml with auto-upgrade. This can happen when the new version removes a feature or when the new version introduces a mandatory field with no default value - **Breaking functional change**: functional changes that force user workflow changes even when the config is the same or upgradeable. ## How do we deprecate things? @@ -21,7 +21,7 @@ A "deprecation event" would coincide with a release. a.) docs b.) release notes c.) command help (if applicable) -d.) annotated-skaffold.yaml (if applicable) +d.) https://skaffold.dev/docs/references/yaml/ (if applicable) 2. if applicable, [from the kubernetes policy](https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli): > Rule #6: Deprecated CLI elements must emit warnings (optionally disable) when used. @@ -44,9 +44,10 @@ However the **behavior** of individual component might suffer breaking changes d - Filewatcher: beta - Builders - - local (beta) - - googleCloudBuild (beta) - - kaniko (beta) + - local: beta + - googleCloudBuild: beta + - kaniko: beta + - plugins gcb: alpha - Artifact types: - Dockerfile: beta - Bazel: beta @@ -56,7 +57,7 @@ However the **behavior** of individual component might suffer breaking changes d - Port-forwarding: alpha - Taggers: beta - gitCommit : beta - - Sha256: beta + - sha256: beta - dateTime : beta - envTagger: beta - Testers: alpha diff --git a/docs/.DS_Store b/docs/.DS_Store index 317a8975ace..e15eaf1126a 100644 Binary files a/docs/.DS_Store and b/docs/.DS_Store differ diff --git a/docs/config.toml b/docs/config.toml index d559c3ecb06..3147ccc7528 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -70,7 +70,7 @@ weight = 1 #copyright = "Skaffold" #privacy_policy = "https://policies.google.com/privacy" github_repo = "https://github.com/GoogleContainerTools/skaffold" -skaffold_version = "skaffold/v1beta2" +skaffold_version = "skaffold/v1beta5" # Google Custom Search Engine ID. Remove or comment out to disable search. gcs_engine_id = "013756393218025596041:3nojel67sum" diff --git a/docs/content/en/docs/_index.md b/docs/content/en/docs/_index.md index 463b3d529f4..8ce5bd8d1bd 100755 --- a/docs/content/en/docs/_index.md +++ b/docs/content/en/docs/_index.md @@ -1,4 +1,3 @@ - --- title: "Skaffold Documentation" linkTitle: "Documentation" @@ -17,13 +16,13 @@ provides building blocks and describe customizations for a CI/CD pipeline. ## Features * Fast local Kubernetes Development - * **optimized source-to-k8s** - Skaffold detects changes in your source code and handles the pipeline to + * **optimized "Source to Kubernetes"** - Skaffold detects changes in your source code and handles the pipeline to **build**, **push**, and **deploy** your application automatically with **policy based image tagging** and **highly optimized, fast local workflows** * **continuous feedback** - Skaffold automatically manages logging and port-forwarding * Skaffold projects work everywhere * **share with other developers** - Skaffold is the easiest way to **share your project** with the world: `git clone` and `skaffold run` * **context aware** - use Skaffold profiles, user level config, environment variables and flags to describe differences in environments - * **CI/CD building blocks** - use `skaffold run` end-to-end or just part of skaffold stages from build to deployment in your CI/CD system + * **CI/CD building blocks** - use `skaffold run` end-to-end or just part of Skaffold stages from build to deployment in your CI/CD system * skaffold.yaml - a single pluggable, declarative configuration for your project * **skaffold init** - Skaffold discovers your files and generates its own config file * **multi-component apps** - Skaffold supports applications consisting of multiple components @@ -57,7 +56,7 @@ Skaffold also supports skipping stages if you want to. {{< /alert >}} What's more, the pluggable architecture is central to Skaffold's design, allowing you to use -the tool you prefer in each stage. Also, skaffold's `profiles` feature grants +the tool you prefer in each stage. Also, Skaffold's `profiles` feature grants you the freedom to switch tools as you see fit depending on the context. For example, if you are coding on a local machine, you can configure Skaffold to build artifacts @@ -75,6 +74,7 @@ Skaffold supports the following tools: * Dockerfile on cloud (Google Cloud Build) * Bazel locally * Jib Maven/Gradle locally +* Jib Maven/Gradle on cloud (Google Cloud Build) {{% /tab %}} {{% tab "TESTERS" %}} @@ -91,7 +91,7 @@ Skaffold supports the following tools: * tag by git commit * tag by current date&time * tag by environment variables based template -* tag by checksum of the source code +* tag by digest of the Docker image {{% /tab %}} {{% tab "PUSH STRATEGIES" %}} @@ -104,7 +104,7 @@ Skaffold supports the following tools: ![architecture](/images/architecture.png) -Besides the above steps, skaffold also automatically manages the following utilities for you: +Besides the above steps, Skaffold also automatically manages the following utilities for you: * forward container ports to your local machine using `kubectl port-forward` * aggregate all the logs from the deployed pods diff --git a/docs/content/en/docs/concepts/_index.md b/docs/content/en/docs/concepts/_index.md index f64bb15c75e..af8af802bf3 100755 --- a/docs/content/en/docs/concepts/_index.md +++ b/docs/content/en/docs/concepts/_index.md @@ -1,4 +1,3 @@ - --- title: "Concepts" linkTitle: "Concepts" @@ -8,12 +7,11 @@ weight: 80 This document discusses some concepts that can help you develop a deep understanding of Skaffold. - ## Configuration of the Skaffold pipeline (skaffold.yaml) You can configure Skaffold with the Skaffold configuration file, `skaffold.yaml`. The configuration file should be placed in the root of your -project directory; when you run the `Skaffold` command, Skaffold will try to +project directory; when you run the `skaffold` command, Skaffold will try to read the configuration file from the current directory. `skaffold.yaml` consists of five different components: @@ -22,13 +20,29 @@ read the configuration file from the current directory. | ---------- | ------------| | `apiVersion` | The Skaffold API version you would like to use. The current API version is {{< skaffold-version >}}. | | `kind` | The Skaffold configuration file has the kind `Config`. | -| `build` | Specifies how Skaffold should build artifacts. You have control over what tool Skaffold can use, how Skaffold tags artifacts and how Skaffold pushes artifacts. Skaffold supports using local Docker daemon, Google Cloud Build, Kaniko, or Bazel to build artifacts. See [Using Builders](/docs/how-tos/builders) and [Using Taggers](/docs/how-tos/taggers) for more information. | -| `test` | Specifies how Skaffold should test artifacts. Skaffold supports [container-structure-tests](https://github.com/GoogleContainerTools/container-structure-test) to test built artifacts.See [Using testers](/docs/how-tos/testers) for more information. | -| `deploy` | Specifies how Skaffold should deploy artifacts. Skaffold supports using `kubectl`, Helm, or kustomize to deploy artifacts.See [Using Deployers](/docs/how-tos/deployers) for more information. | -| `profiles`| Profile is a set of settings that, when activated, overrides the current configuration. You can use Profile to override the `build` and the`deploy`> section. | +| `build` | Specifies how Skaffold builds artifacts. You have control over what tool Skaffold can use, how Skaffold tags artifacts and how Skaffold pushes artifacts. Skaffold supports using local Docker daemon, Google Cloud Build, Kaniko, or Bazel to build artifacts. See [Builders](/docs/how-tos/builders) and [Taggers](/docs/how-tos/taggers) for more information. | +| `test` | Specifies how Skaffold tests artifacts. Skaffold supports [container-structure-tests](https://github.com/GoogleContainerTools/container-structure-test) to test built artifacts. See [Testers](/docs/how-tos/testers) for more information. | +| `deploy` | Specifies how Skaffold deploys artifacts. Skaffold supports using `kubectl`, `helm`, or `kustomize` to deploy artifacts. See [Deployers](/docs/how-tos/deployers) for more information. | +| `profiles`| Profile is a set of settings that, when activated, overrides the current configuration. You can use Profile to override the `build`, `test` and `deploy` sections. | + +You can [learn more](/docs/references/yaml) about the syntax of `skaffold.yaml`. + +## Global configuration (~/.skaffold/config) + +Some context specific settings can be configured in a global configuration file, defaulting to `~/.skaffold/config`. Options can be configured globally or for specific contexts. + +The options are: + +| Option | Type | Description | +| ------ | ---- | ----------- | +| `default-repo` | string | The image registry where images are published (See below). | +| `local-cluster` | boolean | If true, do not try to push images after building. By default, contexts with names `docker-for-desktop`, `docker-desktop`, or `minikube` are treated as local. | -You can learn more about the syntax of `skaffold.yaml` at -[`skaffold.yaml References`](/docs/references/config). +For example, to treat any context as local by default: + +```bash +skaffold config set --global local-cluster true +``` ## Workflow @@ -49,29 +63,35 @@ will not push artifacts to a remote repository. ## Image repository handling Skaffold allows for automatically rewriting image names to your repository. -This way you can grab a skaffold project and just `skaffold run` it to deploy to your cluster. +This way you can grab a Skaffold project and just `skaffold run` it to deploy to your cluster. The way to achieve this is the `default-repo` functionality: 1. Via `default-repo` flag - - skaffold dev --default-repo - + + ```bash + skaffold dev --default-repo + ``` + 1. Via `SKAFFOLD_DEFAULT_REPO` environment variable - SKAFFOLD_DEFAULT_REPO= skaffold dev + ```bash + SKAFFOLD_DEFAULT_REPO= skaffold dev + ``` + +1. Via Skaffold's global config -1. Via skaffold's global config - - skaffold config set default-repo + ```bash + skaffold config set default-repo + ``` -If skaffold doesn't find `default-repo`, there is no automated image name rewriting. +If Skaffold doesn't find `default-repo`, there is no automated image name rewriting. The image name rewriting strategies are designed to be *conflict-free*: the full image name is rewritten on top of the default-repo so similar image names don't collide in the base namespace (e.g.: repo1/example and repo2/example would collide in the target_namespace/example without this) Automated image name rewriting strategies are determined based on the default-repo and the original image repository: -* default-repo does not contain gcr.io +* default-repo does not begin with gcr.io * **strategy**: escape & concat & truncate to 256 ``` @@ -79,15 +99,16 @@ Automated image name rewriting strategies are determined based on the default-re default-repo: aws_account_id.dkr.ecr.region.amazonaws.com rewritten image: aws_account_id.dkr.ecr.region.amazonaws.com/gcr_io_k8s-skaffold_skaffold-example1 ``` -* default-repo contains "gcr.io" (special case - as GCR allows for infinite deep image repo names) + +* default-repo begins with "gcr.io" (special case - as GCR allows for infinite deep image repo names) * **strategy**: concat unless prefix matches * **example1**: prefix doesn't match: - ```` + ``` original image: gcr.io/k8s-skaffold/skaffold-example1 default-repo: gcr.io/myproject/myimage - rewritten image: gcr.io/myproject/gcr.io/k8s-skaffold/skaffold-example1 - ```` + rewritten image: gcr.io/myproject/myimage/gcr.io/k8s-skaffold/skaffold-example1 + ``` * **example2**: prefix matches: ``` @@ -95,31 +116,38 @@ Automated image name rewriting strategies are determined based on the default-re default-repo: gcr.io/k8s-skaffold rewritten image: gcr.io/k8s-skaffold/skaffold-example1 ``` + * **example3**: shared prefix: + + ``` + original image: gcr.io/k8s-skaffold/skaffold-example1 + default-repo: gcr.io/k8s-skaffold/myimage + rewritten image: gcr.io/k8s-skaffold/myimage/skaffold-example1 + ``` ## Architecture -Skaffold has is designed with pluggability in mind: +Skaffold is designed with pluggability in mind: ![architecture](/images/architecture.png) The architecture allows you to use Skaffold with the tool you prefer. Skaffold provides built-in support for the following tools: -* Build +* **Build** * Dockerfile locally, in-cluster with kaniko or using Google Cloud Build * Bazel locally - * Jib Maven and Jib Gradle locally -* Test + * Jib Maven and Jib Gradle locally or using Google Cloud Build +* **Test** * [container-structure-test](https://github.com/GoogleContainerTools/container-structure-test) -* Deploy - * Kubernetes Command-Line Interface (`kubectl`) - * Helm - * kustomize -* Taggers +* **Tag** * Git tagger * Sha256 tagger * Env Template tagger * DateTime tagger +* **Deploy** + * Kubernetes Command-Line Interface (`kubectl`) + * [Helm](https://helm.sh/) + * [kustomize](https://github.com/kubernetes-sigs/kustomize) And you can combine the tools as you see fit in Skaffold. For experimental projects, you may want to use local Docker daemon for building artifacts, and @@ -135,7 +163,7 @@ Cloud Build and deploy using Helm: Skaffold also supports development profiles. You can specify multiple different profiles in the configuration and use whichever best serves your need in the moment without having to modify the configuration file. You can learn more about -profiles from [Using Profiles](/docs/how-tos/profiles). +profiles from [Profiles](/docs/how-tos/profiles). ## Operating modes @@ -149,5 +177,5 @@ Skaffold provides two separate operating modes: you will have to call `skaffold run` again to build and deploy your application. -Skaffold command-line interfact also provides other functionalities that may +Skaffold command-line interface also provides other functionalities that may be helpful to your project. For more information, see [CLI References](/docs/references/cli). diff --git a/docs/content/en/docs/getting-started/_index.md b/docs/content/en/docs/getting-started/_index.md index 1104916ffd3..01486af32f2 100644 --- a/docs/content/en/docs/getting-started/_index.md +++ b/docs/content/en/docs/getting-started/_index.md @@ -1,4 +1,3 @@ - --- title: "Getting Started" linkTitle: "Getting Started" @@ -7,31 +6,31 @@ weight: 10 This document showcases how to get started with Skaffold using [Docker](https://www.docker.com/) and Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). -Aside from Docker and kubectl, Skaffold also supports a variety of other tools +Aside from `Docker` and `kubectl`, Skaffold also supports a variety of other tools and workflows; see [How-to Guides](/docs/how-tos) and [Tutorials](/docs/tutorials) for more information. In this quickstart, you will: -* Install Skaffold -* Download a sample go app -* Use `skaffold dev` to build and deploy your app every time your code changes -* Use `skaffold run` to build and deploy your app once, on demand +* Install Skaffold, +* Download a sample go app, +* Use `skaffold dev` to build and deploy your app every time your code changes, +* Use `skaffold run` to build and deploy your app once, on demand. ## Before you begin * [Install Docker](https://www.docker.com/get-started) * [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -* Configure kubectl to connect to a Kubernetes cluster. You can use +* Configure `kubectl` to connect to a Kubernetes cluster. You can use * any Kubernetes platform with Skaffold; see [Picking the Right Solution](https://kubernetes.io/docs/setup/pick-right-solution/) from Kubernetes documentation for instructions on choosing the right platfrom. * [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) - is a hosted Kubernetes solution. To set up kubectl with Google Kubernetes Engine, + is a hosted Kubernetes solution. To set up `kubectl` with Google Kubernetes Engine, see [Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart). * [Minikube](https://kubernetes.io/docs/setup/minikube/) is a local Kubernetes solution best for development and testing. To set up - kubectl with Minikube, see [Installing Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/).

+ `kubectl` with Minikube, see [Installing Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/).

{{< alert title="Note" >}} If you use a non-local solution, your Docker client needs to be configured @@ -70,7 +69,6 @@ chmod +x skaffold sudo mv skaffold /usr/local/bin ``` - {{% /tab %}} {{% tab "MACOS" %}} @@ -109,7 +107,6 @@ sudo mv skaffold /usr/local/bin ``` {{% /tab %}} - {{% tab "WINDOWS" %}} ### Chocolatey @@ -128,7 +125,6 @@ For the latest **bleeding edge** build, download and place it in your `PATH`: https://storage.googleapis.com/skaffold/builds/latest/skaffold-windows-amd64.exe - {{% /tab %}} {{% /tabs %}} @@ -136,21 +132,23 @@ https://storage.googleapis.com/skaffold/builds/latest/skaffold-windows-amd64.exe 1. Clone the Skaffold repository: - ``` + ```bash git clone https://github.com/GoogleContainerTools/skaffold ``` + 1. Change to the `examples/getting-started` directory. - ``` + ```bash cd examples/getting-started ``` - + ## `skaffold dev`: Build and deploy your app every time your code changes -Run command `skaffold dev` to build and deploy your app continuously. You should -see some outputs similar to the following entries: +Run `skaffold dev --default-repo ` to build and deploy your app continuously. +The `--default-repo` functionality enables pushing images to your own repository instead of the default `gcr.io/k8s-skaffold` repo. +You should see some outputs similar to the following entries: -```bash +``` Starting build... Found [minikube] context, using local docker daemon. Sending build context to Docker daemon 6.144kB @@ -170,7 +168,7 @@ Step 5/5 : RUN go build -o app main.go ---> 9c4622e8f0e7 Successfully built 9c4622e8f0e7 Successfully tagged 930080f0965230e824a79b9e7eccffbd:latest -Successfully tagged gcr.io/k8s-skaffold/skaffold-example:9c4622e8f0e7b5549a61a503bf73366a9cf7f7512aa8e9d64f3327a3c7fded1b +Successfully tagged /gcr.io/k8s-skaffold/skaffold-example:9c4622e8f0e7b5549a61a503bf73366a9cf7f7512aa8e9d64f3327a3c7fded1b Build complete in 657.426821ms Starting deploy... Deploying k8s-pod.yaml... @@ -190,6 +188,10 @@ workflow, which, in this example, is * Deploying the Kubernetes manifest using `kubectl apply -f` * Streaming the logs back from the deployed app +{{< alert title="Note" >}} +For skaffold dev, if `imagePullPolicy` is set to `Always` in your Kubernetes manifest, it will expect the image to exist in a remote registry. +{{< /alert >}} + Let's re-trigger the workflow just by a single code change! Update `main.go` as follows: @@ -213,7 +215,7 @@ The moment you save the file, Skaffold will repeat the workflow described in `skaffold.yaml` and eventually re-deploy your application. Once the pipeline is completed, you should see updated outputs in the terminal: -```bash +``` [getting-started] Hello Skaffold! ``` @@ -227,9 +229,9 @@ Skaffold will perform the workflow described in `skaffold.yaml` exactly once. ## What's next For more in-depth topics of Skaffold, explore [Skaffold Concepts: Configuration](/docs/concepts/#configuration), -[Skaffold Concepts: Workflow](/docs/concepts/workflow), and [Skaffold Concepts: Architecture](/docs/config/architecture). +[Skaffold Concepts: Workflow](/docs/concepts/#workflow), and [Skaffold Concepts: Architecture](/docs/concepts/#architecture). To learn more about how Skaffold builds, tags, and deploys your app, see the How-to Guides on -[Using Builders](/docs/how-tos/builders), [Using Taggers](/docs/how-tos/taggers), and [Using Deployers](/docs/how-tos/deployers). +using [Builders](/docs/how-tos/builders), [Taggers](/docs/how-tos/taggers), and [Deployers](/docs/how-tos/deployers). [Skaffold Tutorials](/docs/tutorials) details some of the common use cases of Skaffold. diff --git a/docs/content/en/docs/how-tos/_index.md b/docs/content/en/docs/how-tos/_index.md index 71f336a0376..ee6d75c8841 100755 --- a/docs/content/en/docs/how-tos/_index.md +++ b/docs/content/en/docs/how-tos/_index.md @@ -1,13 +1,16 @@ - --- title: "How-to Guides" linkTitle: "How-to Guides" weight: 30 --- -| Skaffold Workflow | -|----------| -| [Using Builders](/docs/how-tos/builders) | -| [Using Deployers](/docs/how-tos/deployers) | -| [Using Taggers](/docs/how-tos/taggers) | -| [Using Profiles](/docs/how-tos/profiles) | +| Skaffold Workflow | | +|----------|---| +| [Builders](/docs/how-tos/builders) | How Docker images are built | +| [Testers](/docs/how-tos/testers) | How images are tested | +| [Taggers](/docs/how-tos/taggers) | How images are tagged | +| [Deployers](/docs/how-tos/deployers) | How your app is deployed to a Kubernetes cluster | +| [File sync](/docs/how-tos/filesync) | File sync for files that don’t require full rebuild | +| [Port forwarding](/docs/how-tos/portforward) | Port forwarding from pods | +| [Profiles](/docs/how-tos/profiles) | Define configurations for different contexts | +| [Templated fields](/docs/how-tos/templating) | Adjust configuration with environment variables | diff --git a/docs/content/en/docs/how-tos/builders/_index.md b/docs/content/en/docs/how-tos/builders/_index.md index e63312e40a2..2e7ede2c558 100755 --- a/docs/content/en/docs/how-tos/builders/_index.md +++ b/docs/content/en/docs/how-tos/builders/_index.md @@ -1,7 +1,6 @@ - --- -title: "Using builders" -linkTitle: "Using builders" +title: "Builders" +linkTitle: "Builders" weight: 10 --- @@ -10,61 +9,54 @@ to build Docker images. Skaffold supports the following tools to build your image: -* Dockerfile locally with Docker -* Dockerfile remotely with Google Cloud Build -* Dockerfile in-cluster with Kaniko -* Bazel locally with bazel and Docker daemon -* JIB Maven and Gradle projects locally +* [Dockerfile](https://docs.docker.com/engine/reference/builder/) locally with Docker +* Dockerfile remotely with [Google Cloud Build](https://cloud.google.com/cloud-build/docs/) +* Dockerfile in-cluster with [Kaniko](https://github.com/GoogleContainerTools/kaniko) +* [Bazel](https://bazel.build/) locally +* [Jib](https://github.com/GoogleContainerTools/jib) Maven and Gradle projects locally +* [Jib](https://github.com/GoogleContainerTools/jib) remotely with [Google Cloud Build](https://cloud.google.com/cloud-build/docs/) - The `build` section in the Skaffold configuration file, `skaffold.yaml`, -controls how Skaffold builds artifacts. To use a specific tool for building -artifacts, add the value representing the tool and options for using the tool -to the `build` section. For a detailed discussion on Skaffold configuration, -see [Skaffold Concepts: Configuration](/docs/concepts/#configuration) and -[skaffold.yaml References](/docs/references/config). +controls how artifacts are built. To use a specific tool for building +artifacts, add the value representing the tool and options for using that tool +to the `build` section. + +For a detailed discussion on Skaffold configuration, see +[Skaffold Concepts](/docs/concepts/#configuration) and +[skaffold.yaml References](/docs/references/yaml). ## Dockerfile locally with Docker If you have [Docker Desktop](https://www.docker.com/products/docker-desktop) -installed on your machine, you can configure Skaffold to build artifacts with -the local Docker daemon. +installed, Skaffold can be configured to build artifacts with the local +Docker daemon. By default, Skaffold connects to the local Docker daemon using -[Docker Engine APIs](https://docs.docker.com/develop/sdk/). You can, however, -ask Skaffold to use the [command-line interface](https://docs.docker.com/engine/reference/commandline/cli/) +[Docker Engine APIs](https://docs.docker.com/develop/sdk/). Skaffold can, however, +be asked to use the [command-line interface](https://docs.docker.com/engine/reference/commandline/cli/) instead. Additionally, Skaffold offers the option to build artifacts with -[BuildKit](https://github.com/moby/buildkit). After the artifacts are -successfully built, Skaffold will try pushing the Docker -images to the remote registry. You can choose to skip this step. +[BuildKit](https://github.com/moby/buildkit). + +After the artifacts are successfully built, Docker images will be pushed +to the remote registry. You can choose to skip this step. + +### Configuration To use the local Docker daemon, add build type `local` to the `build` section -of `skaffold.yaml`. The `local` type offers the following options: +of `skaffold.yaml`. The following options can optionally be configured: + +{{< schema root="LocalBuild" >}} -|Option|Description| -|-----|-----| -|`push`| OPTIONAL. Should images be pushed to a registry. Default value is `false` for local clusters, `true` for remote clusters. | -|`useDockerCLI`| OPTIONAL. Uses Docker command-line interface instead of Docker Engine APIs. Default value is `false`. | -|`useBuildkit`| OPTIONAL Uses BuildKit to build Docker images. Default value is `false`. | +### Example -The following `build` section, for example, instructs Skaffold to build a +The following `build` section instructs Skaffold to build a Docker image `gcr.io/k8s-skaffold/example` with the local Docker daemon: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - # Use local Docker daemon to build artifacts - local: - push: true - useDockerCLI: false - useBuildkit: false -# The build section above is equal to -# build: -# artifacts: -# - image: gcr.io/k8s-skaffold/example -# local: {} -``` +{{% readfile file="samples/builders/local.yaml" %}} + +Which is equivalent to: + +{{% readfile file="samples/builders/local-full.yaml" %}} ## Dockerfile remotely with Google Cloud Build @@ -73,34 +65,24 @@ build: your builds using Google infrastructure. To get started with Google Build, see [Cloud Build Quickstart](https://cloud.google.com/cloud-build/docs/quickstart-docker). -Skaffold can automatically connect to Google Cloud Build, and run your builds -with it. After Google Cloud Build finishes building your artifacts, they will +Skaffold can automatically connect to Cloud Build, and run your builds +with it. After Cloud Build finishes building your artifacts, they will be saved to the specified remote registry, such as [Google Container Registry](https://cloud.google.com/container-registry/). -To use Google Cloud Build, add build type `googleCloudBuild` to the `build` -section of `skaffold.yaml`. The `googleCloudBuild` type offers the following -options: +### Configuration + +To use Cloud Build, add build type `googleCloudBuild` to the `build` +section of `skaffold.yaml`. The following options can optionally be configured: -|Option|Description| -|-----|-----| -|`projectId`| REQUIRED The ID of your Google Cloud Platform Project. | -|`DiskSizeGb`| OPTIONAL The disk size of the VM that runs the build. See [Cloud Build API Reference: Build Options](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#buildoptions) for more information. | -|`machineType`| OPTIONAL The type of the VM that runs the build. See [Cloud Build API Reference: Build Options](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#buildoptions) for more information. | -|`timeOut`| OPTIONAL The amount of time (in seconds) that this build should be allowed to run. See [Cloud Build API Reference: Resource/Build](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#resource-build) for more information. | -|`dockerImage`| OPTIONAL The name of the image that will run the build. See [Cloud Build API Reference: BuildStep](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#buildstep) for more information. Default value is `gcr.io/cloud-builders/docker`. | +{{< schema root="GoogleCloudBuild" >}} -The following `build` section, for example, instructs Skaffold to build a +### Example + +The following `build` section, instructs Skaffold to build a Docker image `gcr.io/k8s-skaffold/example` with Google Cloud Build: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - # Use Google Cloud Build to build artifacts - googleCloudBuild: - projectId: YOUR-GCP-PROJECT -``` +{{% readfile file="samples/builders/gcb.yaml" %}} ## Dockerfile in-cluster with Kaniko @@ -111,58 +93,108 @@ that cannot easily or securely run a Docker daemon. Skaffold can help build artifacts in a Kubernetes cluster using the Kaniko image; after the artifacts are built, kaniko can push them to remote registries. + +### Configuration + To use Kaniko, add build type `kaniko` to the `build` section of -`skaffold.yaml`. The `kaniko` type offers the following options: +`skaffold.yaml`. The following options can optionally be configured: + +{{< schema root="KanikoBuild" >}} + +The `buildContext` can be either: -|Option|Description| -|-----|-----| -|`buildContext`| OPTIONAL The Kaniko build context. See [Kaniko Documentation: Using Kaniko](https://github.com/GoogleContainerTools/kaniko#using-kaniko) for more information. | -|`pullSecret`| OPTIONAL The path to the secret key file. See [Kaniko Documentation: Running Kaniko in a Kubernetes cluster](https://github.com/GoogleContainerTools/kaniko#running-kaniko-in-a-kubernetes-cluster) for more information. | -|`pullSecretName`| OPTIONAL The name of the Kubernetes secret for pulling the files from the build context and pushing the final image. See [Kaniko Documentation: Running Kaniko in a Kubernetes cluster](https://github.com/GoogleContainerTools/kaniko#running-kaniko-in-a-kubernetes-cluster) for more information. Default value is `kaniko-secret`. | -|`namespace`| OPTIONAL The Kubernetes namespace. Default value is the current namespace in Kubernetes configuration. | -|`timeout`| OPTIONAL The amount of time (in seconds) that this build should be allowed to run. Default value is 20 minutes (`20m`). | +{{< schema root="KanikoBuildContext" >}} -The following `build` section, for example, instructs Skaffold to build a +### Example + +The following `build` section, instructs Skaffold to build a Docker image `gcr.io/k8s-skaffold/example` with Kaniko: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - # Use Kaniko to build artifacts - kaniko: - buildContext: - gcsBucket: YOUR-BUCKET -``` +{{% readfile file="samples/builders/kaniko.yaml" %}} ## Jib Maven and Gradle locally -{{% todo 1299 %}} +[Jib](https://github.com/GoogleContainerTools/jib#jib) is a set of plugins for +[Maven](https://github.com/GoogleContainerTools/jib/blob/master/jib-maven-plugin) and +[Gradle](https://github.com/GoogleContainerTools/jib/blob/master/jib-gradle-plugin) +for building optimized Docker and OCI images for Java applications +without a Docker daemon. + +Skaffold can help build artifacts using Jib; Jib builds the container images and then +pushes them to the local Docker daemon or to remote registries as instructed by Skaffold. + +### Configuration + +To use Jib, add a `jibMaven` or `jibGradle` field to each artifact you specify in the +`artifacts` part of the `build` section. `context` should be a path to +your Maven or Gradle project. + +{{< alert title="Note" >}} +Your project must be configured to use Jib already. +{{< /alert >}} + +The `jibMaven` type offers the following options: +{{< schema root="JibMavenArtifact" >}} -## Bazel locally with bazel and Docker daemon +The `jibGradle` type offers the following options: + +{{< schema root="JibGradleArtifact" >}} + +### Example + +See the [Skaffold-Jib demo project](https://github.com/GoogleContainerTools/skaffold/blob/master/examples/jib/) +for an example. + +### Multi-Module Projects + +Skaffold can be configured for _multi-module projects_ too. A multi-module project +has several _modules_ (Maven terminology) or _sub-projects_ (Gradle terminology) that +each produce a separate container image. + +#### Maven + +To build a multi-module project with Maven, specify each module as a separate +Skaffold artifact. For each artifact, add a `jibMaven` field with a `module` field +specifying either the module's `:artifactId`, `groupId:artifactId`, or the relative path +to the module _within the project_. Each artifact's `context` field +should point to the root project location. + +Building multi-module projects with Skaffold-Jib has one additional requirement: +a Jib goal must be explicitly bound to the `package` phase for each specific +module that produces a container image. + +#### Gradle + +To build a multi-module project with Gradle, specify each sub-project as a separate +Skaffold artifact. For each artifact, add a `jibGradle` field with a `project` field +containing the sub-project's name (the directory, by default). Each artifact's `context` field +should point to the root project location. + +## Jib Maven and Gradle remotely with Google Cloud Build + +{{% todo 1299 %}} + +## Bazel locally [Bazel](https://bazel.build/) is a fast, scalable, multi-language, and extensible build system. Skaffold can help build artifacts using Bazel; after Bazel finishes building -container images, they will be loaded into the local Docker daemon. To use -Bazel, add `workspace` and `bazel` fields to each artifact you specify in the +container images, they will be loaded into the local Docker daemon. + +### Configuration + +To use Bazel, `bazel` field to each artifact you specify in the `artifacts` part of the `build` section, and use the build type `local`. `context` should be a path containing the bazel files -(`WORKSPACE` and `BUILD`); The `bazel` field should have a `target` -specification, which Skaffold will use to load the image to the Docker daemon. +(`WORKSPACE` and `BUILD`). The following options can optionally be configured: -The following `build` section, for example, instructs Skaffold to build a -Docker image `gcr.io/k8s-skaffold/example` with Bazel: +{{< schema root="BazelArtifact" >}} -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - context: . - bazel: - target: //:example.tar - local: {} -``` +### Example + +The following `build` section instructs Skaffold to build a +Docker image `gcr.io/k8s-skaffold/example` with Bazel: +{{% readfile file="samples/builders/bazel.yaml" %}} diff --git a/docs/content/en/docs/how-tos/deployers/_index.md b/docs/content/en/docs/how-tos/deployers/_index.md index fba154c10ad..212be8b3a82 100755 --- a/docs/content/en/docs/how-tos/deployers/_index.md +++ b/docs/content/en/docs/how-tos/deployers/_index.md @@ -1,69 +1,72 @@ - --- -title: "Using deployers" -linkTitle: "Using deployers" -weight: 20 +title: "Deployers" +linkTitle: "Deployers" +weight: 30 --- This page discusses how to set up Skaffold to use the tool of your choice to deploy your app to a Kubernetes cluster. -When skaffold deploys an application the following steps happen: +When Skaffold deploys an application the following steps happen: -* the skaffold deployer _renders_ the final kubernetes manifests: skaffold replaces the image names in the kubernetes manifests with the final tagged image names. +* the Skaffold deployer _renders_ the final kubernetes manifests: Skaffold replaces the image names in the kubernetes manifests with the final tagged image names. Also, in case of the more complicated deployers the rendering step involves expanding templates (in case of helm) or calculating overlays (in case of kustomize). -* the skaffold deployer _deploys_ the final kubernetes manifests to the cluster +* the Skaffold deployer _deploys_ the final kubernetes manifests to the cluster + +### Supported deployers Skaffold supports the following tools for deploying applications: * [`kubectl`](#deploying-with-kubectl) -* [Helm](#deploying-with-helm) +* [helm](#deploying-with-helm) * [kustomize](#deploying-with-kustomize) The `deploy` section in the Skaffold configuration file, `skaffold.yaml`, controls how Skaffold builds artifacts. To use a specific tool for deploying artifacts, add the value representing the tool and options for using the tool -to the `build` section. For a detailed discussion on Skaffold configuration, -see [Skaffold Concepts: Configuration](/docs/concepts/#configuration) and -[Skaffold.yaml References](/docs/references/config). +to the `deploy` section. + +For a detailed discussion on Skaffold configuration, see +[Skaffold Concepts](/docs/concepts/#configuration) and +[skaffold.yaml References](/docs/references/yaml). ## Deploying with kubectl -[`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) is -Kubernetes command-line tool for deploying and managing -applications on Kubernetes clusters. Skaffold can work with `kubectl` to +`kubectl` is Kubernetes +[command-line tool](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +for deploying and managing +applications on Kubernetes clusters. + +Skaffold can work with `kubectl` to deploy artifacts on any Kubernetes cluster, including [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine) clusters and local [Minikube](https://github.com/kubernetes/minikube) clusters. +### Configuration + To use `kubectl`, add deploy type `kubectl` to the `deploy` section of -`skaffold.yaml`. The `kubectl` type offers the following options: +`skaffold.yaml`. + +The `kubectl` type offers the following options: -|Option|Description| -|----|----| -|`manifests`| OPTIONAL. A list of paths to Kubernetes Manifests. Default value is `kubectl`.| -|`remoteManifests`|OPTIONAL. A list of paths to Kubernetes Manifests in remote clusters.| -|`flags`| OPTIONAL. Additional flags to pass to `kubectl`. You can specify three types of flags:
  • `global`: flags that apply to every command.
  • `apply`: flags that apply to creation commands.
  • `delete`: flags that apply to deletion commands.
    • | +{{< schema root="KubectlDeploy" >}} -The following `deploy` section, for example, instructs Skaffold to deploy +`flags` section offers the following options: + +{{< schema root="KubectlFlags" >}} + +### Example + +The following `deploy` section instructs Skaffold to deploy artifacts using `kubectl`: -```yaml -deploy: - kubectl: - manifests: - - k8s-* - # Uncomment the following lines to add remote manifests and flags - # remoteManifests: - # - YOUR-REMOTE-MANIFESTS - # flags: - # global: - # - YOUR-GLOBAL-FLAGS - # apply: - # - YOUR-APPLY-FLAGS - # delete: - # - YOUR-DELETE-FLAGS -``` +{{% readfile file="samples/deployers/kubectl.yaml" %}} + +{{< alert title="Note" >}} +kubectl CLI must be installed on your machine. Skaffold will not +install it. +Also, it has to be installed in a version that's compatible with your cluster. +{{< /alert >}} ## Deploying with Helm @@ -71,92 +74,58 @@ deploy: manage Kubernetes applications. Skaffold can work with Helm by calling its command-line interface. -To use Helm with Skaffold, add deploy type `helm` to the `deploy` section -of `skaffold.yaml`. The `helm` type offers the following options: - -|Option|Description| -|----|----| -|`releases`|Required A list of Helm releases. See the table below for the schema of `releases`.| - -Each release includes the following fields: - -|Option|Description| -|----|----| -|`name`| Required The name of the Helm release.| -|`chartPath`|Required The path to the Helm chart.| -|`valuesFilePath`| The path to the Helm `values` file.| -|`values`| A list of key-value pairs supplementing the Helm `values` file.| -|`namespace`| The Kubernetes namespace.| -|`version`| The version of the chart.| -|`setValues`| A list of key-value pairs; if present, Skaffold will sent `--set` flag to Helm CLI and append all pairs after the flag.| -|`setValueTemplates`| A list of key-value pairs; if present, Skaffold will try to parse the value part of each key-value pair using environment variables in the system, then send `--set` flag to Helm CLI and append all parsed pairs after the flag.| -|`wait`| A boolean value; if `true`, Skaffold will send `--wait` flag to Helm CLI.| -|`recreatePods`| A boolean value; if `true`, Skaffold will send `--recreate-pods` flag to Helm CLI.| -|`overrides`| A list of key-value pairs; if present, Skaffold will build a Helm `values` file that overrides the original and use it to call Helm CLI (`--f` flag).| -|`packaged`|Packages the chart (`helm package`) Includes two fields:
      • `version`: Version of the chart.
      • `appVersion`: Version of the app.
      | |`imageStrategy`|Add image configurations to the Helm `values` file. Includes one of the two following fields:
      • `fqn`: The image configuration uses the syntax `IMAGE-NAME=IMAGE-REPOSITORY:IMAGE-TAG`.
      • `helm`: The image configuration uses the syntax `IMAGE-NAME.repository=IMAGE-REPOSITORY, IMAGE-NAME.tag=IMAGE-TAG`.
      | - -The following `deploy` section, for example, instructs Skaffold to deploy +### Configuration + +To use Helm with Skaffold, add deploy type `helm` to the `deploy` section of `skaffold.yaml`. + +The `helm` type offers the following options: + +{{< schema root="HelmDeploy" >}} + +Each `release` includes the following fields: + +{{< schema root="HelmRelease" >}} + +### Example + +The following `deploy` section instructs Skaffold to deploy artifacts using `helm`: -```yaml -deploy: - helm: - releases: - - name: skaffold-helm - chartPath: skaffold-helm - values: - image: gcr.io/k8s-skaffold/skaffold-helm - # Uncomment the following lines to specify more parameters - # valuesFilePath: YOUR-VALUES-FILE-PATH - # namespace: YOUR-NAMESPACE - # version: YOUR-VERSION - # setValues: - # SOME-KEY: SOME-VALUE - # setValues: - # SOME-KEY: SOME-VALUE-TEMPLATE - # setValueTemplates: - # SOME-KEY: "{{.SOME-ENV-VARIABLE}}" - # wait: true - # recreatePods: true - # overrides: - # SOME-KEY: SOME-VALUE - # SOME-MORE-KEY: - # SOME-KEY: SOME-VALUE - # packaged: - # version: YOUR-VERSION - # appVersion: YOUR-APP-VERSION - # imageStrategy: - # helm: {} - # OR - # fqn: {} -``` +{{% readfile file="samples/deployers/helm.yaml" %}} + +{{< alert title="Note" >}} +helm CLI must be installed on your machine. Skaffold will not +install it. +Also, it has to be installed in a version that's compatible with your cluster. +{{< /alert >}} ## Deploying with kustomize [kustomize](https://github.com/kubernetes-sigs/kustomize) allows Kubernetes developers to customize raw, template-free YAML files for multiple purposes. -Skaffold can work with kustomize by calling its command-line interface. +Skaffold can work with `kustomize` by calling its command-line interface. + +### Configuration To use kustomize with Skaffold, add deploy type `kustomize` to the `deploy` -section of `skaffold.yaml`. The `kustomize` type offers the following options: +section of `skaffold.yaml`. + +The `kustomize` type offers the following options: + +{{< schema root="KustomizeDeploy" >}} -|Option|Description| -|----|----| -|`kustomizePath`| Optional Path to Kustomization files. The default value is `.` (current directory).| -|`flags`| OPTIONAL. Additional flags to pass to `kubectl`. You can specify three types of flags:
      • `global`: flags that apply to every command.
      • `apply`: flags that apply to creation commands.
      • `delete`: flags that apply to deletion commands.
        • | +`flags` section offers the following options: -The following `deploy` section, for example, instructs Skaffold to deploy +{{< schema root="KubectlFlags" >}} + +### Example + +The following `deploy` section instructs Skaffold to deploy artifacts using kustomize: -```yaml -apiVersion: {{< skaffold-version >}} - kind: Config - deploy: - kustomize: - kustomizePath: "." -# The deploy section above is equal to -# apiVersion: {{< skaffold-version >}} -# kind: Config -# deploy: -# kustomize: {} -``` +{{% readfile file="samples/deployers/kustomize.yaml" %}} + +{{< alert title="Note" >}} +kustomize CLI must be installed on your machine. Skaffold will not +install it. +{{< /alert >}} diff --git a/docs/content/en/docs/how-tos/filesync/_index.md b/docs/content/en/docs/how-tos/filesync/_index.md index bd313cc66ea..ac245eef542 100755 --- a/docs/content/en/docs/how-tos/filesync/_index.md +++ b/docs/content/en/docs/how-tos/filesync/_index.md @@ -1,10 +1,9 @@ - --- -title: "Using file sync" -linkTitle: "Using file sync" +title: "File sync" +linkTitle: "File sync" weight: 40 --- -This page discusses how to set up Skaffold to setup file sync for files that don't require full rebuild. - +This page discusses how to set up file sync for files that don't require full rebuild. + {{% todo 1076 %}} diff --git a/docs/content/en/docs/how-tos/portforward/_index.md b/docs/content/en/docs/how-tos/portforward/_index.md index 491cbdd7595..475073bd9de 100755 --- a/docs/content/en/docs/how-tos/portforward/_index.md +++ b/docs/content/en/docs/how-tos/portforward/_index.md @@ -1,10 +1,29 @@ - --- -title: "Using port forwarding" -linkTitle: "Using port forwarding" +title: "Port forwarding" +linkTitle: "Port forwarding" weight: 50 --- -This page discusses how to set up Skaffold to setup port forwarding for container ports from pods. - -{{% todo 1076 %}} +This page discusses how Skaffold sets up port forwarding for container ports from pods. When Skaffold deploys an application, it will automatically forward any ports mentioned in the pod spec. + +### Example + +With the following pod manifest, Skaffold will forward port 8000 to port 8000 on our machine: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: example +spec: + containers: + - name: skaffold-example + image: gcr.io/k8s-skaffold/skaffold-example + ports: + - name: web + containerPort: 8000 +``` + +{{< alert title="Note" >}} +If port 8000 isn't available, another random port will be chosen. +{{< /alert >}} diff --git a/docs/content/en/docs/how-tos/profiles/_index.md b/docs/content/en/docs/how-tos/profiles/_index.md index 0fd44a31852..010afedd371 100644 --- a/docs/content/en/docs/how-tos/profiles/_index.md +++ b/docs/content/en/docs/how-tos/profiles/_index.md @@ -1,63 +1,60 @@ - --- -title: "Using profiles" -linkTitle: "Using profiles" +title: "Profiles" +linkTitle: "Profiles" weight: 70 --- -This page discusses Skaffold profiles. +Skaffold profiles allow you to define build, test and deployment +configurations for different contexts. Different contexts are typically different +environments in your app's lifecycle, like Production or Development. -Skaffold profiles allow you to define build and deployment -configurations for different contexts. Different contexts are typically different environments in your app's lifecycle, like Production or Development. +You can create profiles in the `profiles` section of `skaffold.yaml`. -You can create profiles in the `profiles` section of `skaffold.yaml`. For a -detailed discussion on Skaffold configuration, -see [Skaffold Concepts: Configuration](/docs/concepts/#configuration) and -[skaffold.yaml References](/docs/references/config). +For a detailed discussion on Skaffold configuration, see +[Skaffold Concepts](/docs/concepts/#configuration) and +[skaffold.yaml References](/docs/references/yaml). ## Profiles (`profiles`) -Each profile has three parts: +Each profile has four parts: -* Name (`name`): The name of the profile. +* Name (`name`): The name of the profile * Build configuration (`build`) +* Test configuration (`test`) * Deploy configuration (`deploy`) -Once activated, the specified build and deploy configuration -in the profile will override the `build` and `deploy` section declared -in `skaffold.yaml`. The build and deploy configuration in the `profiles` -section use the same syntax as the `build` and `deploy` section of -`skaffold.yaml`; for more information, see [Using Builders](/docs/how-tos/builders), -[Using Taggers](/docs/how-tos/taggers), and [Using Deployers](/docs/how-tos/deployers). +Once activated, the specified `build`, `test` and `deploy` configuration +in the profile will override the `build`, `test` and `deploy` sections declared +in `skaffold.yaml`. The `build`, `test` and `deploy` configuration in the `profiles` +section use the same syntax as the `build`, `test` and `deploy` sections of +`skaffold.yaml`; for more information, see [Builders](/docs/how-tos/builders), +[Testers](/docs/how-tos/testers), and [Deployers](/docs/how-tos/deployers). + +### Activation -You can activate a profile with the `-p` (`--profile`) parameter in the +You can activate profiles with the `-p` (`--profile`) parameter in the `skaffold dev` and `skaffold run` commands. +```bash +skaffold run -p [PROFILE] +``` + +### Example + The following example, showcases a `skaffold.yaml` with one profile, `gcb`, for building with Google Cloud Build: -```yaml -apiVersion: skaffold/v1beta2 -kind: Config -build: - artifacts: - - image: gcr.io/k8s-skaffold/skaffold-example - deploy: - kubectl: - manifests: - - k8s-pod - profiles: - - name: test-env - build: - googleCloudBuild: - projectId: k8s-skaffold -``` +{{% readfile file="samples/profiles/profiles.yaml" %}} With no profile activated, Skaffold will build the artifact `gcr.io/k8s-skaffold/skaffold-example` using local Docker daemon and deploy it -with `kubectl`. However, if you run Skaffold with the following command: +with `kubectl`. -`skaffold dev -p test-env` (or `skaffold run -p test-env`) +However, if you run Skaffold with the following command: + +```bash +skaffold dev -p gcb +``` Skaffold will switch to Google Cloud Build for building artifacts. Note that since the `gcb` profile does not specify a deploy configuration, Skaffold will diff --git a/docs/content/en/docs/how-tos/taggers/_index.md b/docs/content/en/docs/how-tos/taggers/_index.md index 0a966c85404..404be5d83a6 100644 --- a/docs/content/en/docs/how-tos/taggers/_index.md +++ b/docs/content/en/docs/how-tos/taggers/_index.md @@ -1,72 +1,67 @@ - --- -title: "Using taggers" -linkTitle: "Using taggers" -weight: 30 +title: "Taggers" +linkTitle: "Taggers" +weight: 20 --- This page discusses how to set up Skaffold to tag artifacts as you see fit. Skaffold supports the following tagging policies: -* Using Git commit IDs as tags (`gitCommit`) -* Using Sha256 hashes of contents as tags (`sha256`) -* Using values of environment variables as tags (`envTemplate`) -* Using date and time values as tags (`dateTime`) +* `gitCommit`: uses Git commit IDs as tags +* `sha256`: uses Sha256 hashes of contents as tags +* `envTemplate`: uses values of environment variables as tags +* `dateTime`: uses date and time values as tags Tag policy is specified in the `tagPolicy` field of the `build` section of the -Skaffold configuration file, `skaffold.yaml`. For a detailed discussion on -Skaffold configuration, -see [Skaffold Concepts: Configuration](/docs/concepts/#configuration) and -[skaffold.yaml References](/docs/references/config). +Skaffold configuration file, `skaffold.yaml`. + +For a detailed discussion on Skaffold configuration, see +[Skaffold Concepts](/docs/concepts/#configuration) and +[skaffold.yaml References](/docs/references/yaml). -## `gitCommit`: using Git commit IDs as tags +## `gitCommit`: uses Git commit IDs as tags `gitCommit` is the default tag policy of Skaffold: if you do not specify the -`tagPolicy` field in the `build` section, Skaffold will tag artifacts with -the Git commit IDs of the repository. +`tagPolicy` field in the `build` section, Skaffold will use Git information +to tag artifacts. -The following `build` section, for example, instructs Skaffold to build a +The `gitCommit` tagger will look at the Git workspace that contains +the artifact's `context` directory and tag according to those rules: + + + If the workspace is on a Git tag, that tag is used to tag images + + If the workspace is on a Git commit, the short commit is used + + It the workspace has uncommited changes, a `-dirty` suffix is appended to the image tag + +### Example + +The following `build` section instructs Skaffold to build a Docker image `gcr.io/k8s-skaffold/example` with the `gitCommit` tag policy specified explicitly: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - tagPolicy: - gitCommit: {} - local: {} -``` +{{% readfile file="samples/taggers/git.yaml" %}} + +### Configuration `gitCommit` tag policy features no options. -## `sha256`: using Sha256 hashes of contents as tags +## `sha256`: uses Sha256 hashes of contents as tags `sha256` is a content-based tagging strategy: it uses the Sha256 hash of your built image as the tag of the Docker image. -{{< alert title="Note" >}} - -It is recommended that you use `sha256` tag policy during development, as -it allows Kubernetes to re-deploy images every time your source code changes. -{{< /alert >}} +### Example -The following `build` section, for example, instructs Skaffold to build a +The following `build` section instructs Skaffold to build a Docker image `gcr.io/k8s-skaffold/example` with the `sha256` tag policy: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - tagPolicy: - sha256: {} - local: {} -``` +{{% readfile file="samples/taggers/sha256.yaml" %}} + +### Configuration `sha256` tag policy features no options. -## `envTemplate`: using values of environment variables as tags +## `envTemplate`: uses values of environment variables as tags `envTemplate` allows you to use environment variables in tags. This policy requires that you specify a tag template, where part of template @@ -87,56 +82,40 @@ image. the artifacts part of the build section. {{< /alert >}} -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - tagPolicy: - envTemplate: - template: "{{.IMAGE_NAME}}:{{.FOO}}" - local: {} -``` +### Example + +{{% readfile file="samples/taggers/envTemplate.yaml" %}} Suppose the value of the `FOO` environment variable is `v1`, the image built will be `gcr.io/k8s-skaffold/example:v1`. +### Configuration + The tag template uses the [Go Programming Language Syntax](https://golang.org/pkg/text/template/). As showcased in the example, `envTemplate` tag policy features one -**required** parameter, `template`, which is the tag template to use. To learn more about templating support in skaffold.yaml see [Using templated fields](/docs/how-tos/templating) +**required** parameter, `template`, which is the tag template to use. To learn more about templating support in Skaffold.yaml see [Templated fields](/docs/how-tos/templating) -## `dateTime`: using data and time values as tags +## `dateTime`: uses data and time values as tags `dateTime` uses the time when Skaffold starts building artifacts as the tag. You can choose which format and timezone Skaffold should use. By default, Skaffold uses the time format `2006-01-02_15-04-05.999_MST` and the local timezone. +### Example + The following `build` section, for example, instructs Skaffold to build a Docker image `gcr.io/k8s-skaffold/example` with the `dateTime` tag policy: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - tagPolicy: - dateTime: - format: "2006-01-02_15-04-05.999_MST" - timezone: "Local" - local: {} -# The build section above is equal to -# build: -# artifacts: -# - image: gcr.io/k8s-skaffold/example -# tagPolicy: -# dateTime: {} -# local: {} -``` +{{% readfile file="samples/taggers/dateTime.yaml" %}} Suppose current time is `15:04:09.999 January 2nd, 2006` and current time zone is `MST` (`US Mountain Standard Time`), the image built will be `gcr.io/k8s-skaffold/example:2006-01-02_15-04-05.999_MST`. +### Configuration + You can learn more about what time format and time zone you can use in [Go Programming Language Documentation: Time package/Format Function](https://golang.org/pkg/time/#Time.Format) and [Go Programming Language Documentation: Time package/LoadLocation Function](https://golang.org/pkg/time/#LoadLocation) respectively. As showcased in the diff --git a/docs/content/en/docs/how-tos/templating/_index.md b/docs/content/en/docs/how-tos/templating/_index.md index a4745f54658..20ee6e468f8 100755 --- a/docs/content/en/docs/how-tos/templating/_index.md +++ b/docs/content/en/docs/how-tos/templating/_index.md @@ -1,22 +1,13 @@ - --- -title: "Using templated fields" -linkTitle: "Using templated fields" +title: "Templated fields" +linkTitle: "Templated fields" weight: 90 --- Skaffold config allows for certain fields to have values injected that are either environment variables or calculated by Skaffold. For example: -```yaml -build: - artifacts: - - image: gcr.io/k8s-skaffold/example - tagPolicy: - envTemplate: - template: "{{.IMAGE_NAME}}:{{.FOO}}" - local: {} -``` +{{% readfile file="samples/templating/env.yaml" %}} Suppose the value of the `FOO` environment variable is `v1`, the image built will be `gcr.io/k8s-skaffold/example:v1`. @@ -28,9 +19,5 @@ List of fields that support templating: List of variables that are available for templating: -* all environment variables passed to the skaffold process as startup -* `IMAGE_NAME` - the artifacts' image name - the [image name rewriting](/docs/concepts/#image-repository-handling) acts after the template was calculated -* `DIGEST` - the image digest calculated by the docker registry after pushing the image -* if `DIGEST` is of format `algo:hex`, `DIGEST_ALGO` and `DIGEST_HEX` parts correspond to the parts of the string otherwise `DIGEST_HEX`=`DIGEST` is set - - +* all environment variables passed to the Skaffold process at startup +* `IMAGE_NAME` - the artifacts' image name - the [image name rewriting](/docs/concepts/#image-repository-handling) acts after the template is calculated diff --git a/docs/content/en/docs/how-tos/testers/_index.md b/docs/content/en/docs/how-tos/testers/_index.md index 7ad0843bac2..50bedd75f53 100755 --- a/docs/content/en/docs/how-tos/testers/_index.md +++ b/docs/content/en/docs/how-tos/testers/_index.md @@ -1,8 +1,7 @@ - --- -title: "Using testers" -linkTitle: "Using testers" -weight: 80 +title: "Testers" +linkTitle: "Testers" +weight: 15 --- This page discusses how to set up Skaffold to run container structure tests after building an artifact. diff --git a/docs/content/en/docs/references/_index.md b/docs/content/en/docs/references/_index.md index 43afea6ef5b..9f3f2714496 100755 --- a/docs/content/en/docs/references/_index.md +++ b/docs/content/en/docs/references/_index.md @@ -1,4 +1,3 @@ - --- title: "References" linkTitle: "References" @@ -7,6 +6,6 @@ weight: 100 | Skaffold References | |----------| -| [CLI References](/docs/references/cli) | -| [skaffold.yaml References](https://github.com/GoogleContainerTools/skaffold/blob/master/examples/annotated-skaffold.yaml) | +| [CLI](/docs/references/cli) | +| [skaffold.yaml](/docs/references/yaml) | diff --git a/docs/content/en/docs/references/cli/_index.md b/docs/content/en/docs/references/cli/_index.md index b0369e3bfce..e8937e02f60 100755 --- a/docs/content/en/docs/references/cli/_index.md +++ b/docs/content/en/docs/references/cli/_index.md @@ -1,6 +1,6 @@ --- -title: "CLI References" -linkTitle: "CLI References" +title: "CLI" +linkTitle: "CLI" weight: 110 --- @@ -20,7 +20,7 @@ Pipeline building blocks for CI/CD: Getting started with a new project: -* [skaffold init](#skaffold-init) - to bootstrap skaffold.yaml +* [skaffold init](#skaffold-init) - to bootstrap Skaffold config * [skaffold fix](#skaffold-fix) - to upgrade from Utilities: @@ -29,7 +29,7 @@ Utilities: * [skaffold version](#skaffold-version) - get Skaffold version * [skaffold completion](#skaffold-completion) - setup tab completion for the CLI * [skaffold config](#skaffold-config) - manage context specific parameters -* [skaffold diagnose](#skaffold-diagnose) - diagnostics of skaffold works in your project +* [skaffold diagnose](#skaffold-diagnose) - diagnostics of Skaffold works in your project ## Global flags @@ -43,7 +43,7 @@ Utilities: | Flag | Description | |------- |---------------| -|`SKAFFOLD_UPDATE_CHECK`|Enables checking for latest version of the skaffold binary. By default it's `true`. | +|`SKAFFOLD_UPDATE_CHECK`|Enables checking for latest version of the Skaffold binary. By default it's `true`. | ## Skaffold commands @@ -63,6 +63,7 @@ Usage: skaffold build [flags] Flags: + -b, --build-image stringArray Choose which artifacts to build. Artifacts with image names that contain the expression will be built only. Default is to build sources for all artifacts -d, --default-repo string Default repository value (overrides global config) -f, --filename string Filename or URL to the pipeline file (default "skaffold.yaml") -n, --namespace string Run deployments in the specified namespace @@ -74,12 +75,14 @@ Flags: --toot Emit a terminal beep after the deploy is complete Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") ``` Env vars: +* `SKAFFOLD_BUILD_IMAGE` (same as --build-image) * `SKAFFOLD_DEFAULT_REPO` (same as --default-repo) * `SKAFFOLD_FILENAME` (same as --filename) * `SKAFFOLD_NAMESPACE` (same as --namespace) @@ -91,13 +94,14 @@ Env vars: ### skaffold completion -Output command completion script for the bash shell +Output shell completion for the given shell (bash or zsh) ``` Usage: - skaffold completion bash [flags] + skaffold completion SHELL [flags] Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -107,18 +111,19 @@ Env vars: ### skaffold config -A set of commands for interacting with the skaffold config. +A set of commands for interacting with the Skaffold config. ``` Usage: skaffold config [command] Available Commands: - list List all values set in the global skaffold config - set Set a value in the global skaffold config - unset Unset a value in the global skaffold config + list List all values set in the global Skaffold config + set Set a value in the global Skaffold config + unset Unset a value in the global Skaffold config Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") Use "skaffold config [command] --help" for more information about a command. @@ -145,6 +150,7 @@ Flags: --toot Emit a terminal beep after the deploy is complete Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -178,6 +184,7 @@ Flags: --toot Emit a terminal beep after the deploy is complete Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -205,6 +212,7 @@ Usage: Flags: --cleanup Delete deployments after dev mode is interrupted (default true) -d, --default-repo string Default repository value (overrides global config) + --experimental-gui Experimental Graphical User Interface -f, --filename string Filename or URL to the pipeline file (default "skaffold.yaml") -l, --label stringArray Add custom labels to deployed objects. Set multiple times for multiple labels -n, --namespace string Run deployments in the specified namespace @@ -218,6 +226,7 @@ Flags: -i, --watch-poll-interval int Interval (in ms) between two checks for file changes (default 1000) Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -226,6 +235,7 @@ Env vars: * `SKAFFOLD_CLEANUP` (same as --cleanup) * `SKAFFOLD_DEFAULT_REPO` (same as --default-repo) +* `SKAFFOLD_EXPERIMENTAL_GUI` (same as --experimental-gui) * `SKAFFOLD_FILENAME` (same as --filename) * `SKAFFOLD_LABEL` (same as --label) * `SKAFFOLD_NAMESPACE` (same as --namespace) @@ -250,6 +260,7 @@ Flags: -f, --filename string Filename or URL to the pipeline file (default "skaffold.yaml") Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -260,7 +271,7 @@ Env vars: ### skaffold fix -Converts old skaffold.yaml to newest schema version +Converts old Skaffold config to newest schema version ``` Usage: @@ -271,6 +282,7 @@ Flags: --overwrite Overwrite original config with fixed config Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -282,7 +294,7 @@ Env vars: ### skaffold init -Automatically generate skaffold configuration for deploying an application +Automatically generate Skaffold configuration for deploying an application ``` Usage: @@ -293,10 +305,11 @@ Flags: (example: --artifact=/web/Dockerfile.web=gcr.io/web-project/image) --compose-file string Initialize from a docker-compose file -f, --filename string Filename or URL to the pipeline file (default "skaffold.yaml") - --force Force the generation of the skaffold config - --skip-build Skip generating build artifacts in skaffold config + --force Force the generation of the Skaffold config + --skip-build Skip generating build artifacts in Skaffold config Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -329,6 +342,7 @@ Flags: --toot Emit a terminal beep after the deploy is complete Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") @@ -358,6 +372,7 @@ Flags: ) Global Flags: + --color int Specify the default output color in ANSI escape codes (default 34) -v, --verbosity string Log level (debug, info, warn, error, fatal, panic) (default "warning") diff --git a/docs/content/en/docs/references/cli/index_header b/docs/content/en/docs/references/cli/index_header index 0e7cb4b3b79..11102331b19 100644 --- a/docs/content/en/docs/references/cli/index_header +++ b/docs/content/en/docs/references/cli/index_header @@ -1,6 +1,6 @@ --- -title: "CLI References" -linkTitle: "CLI References" +title: "CLI" +linkTitle: "CLI" weight: 110 --- @@ -20,7 +20,7 @@ Pipeline building blocks for CI/CD: Getting started with a new project: -* [skaffold init](#skaffold-init) - to bootstrap skaffold.yaml +* [skaffold init](#skaffold-init) - to bootstrap Skaffold config * [skaffold fix](#skaffold-fix) - to upgrade from Utilities: @@ -29,7 +29,7 @@ Utilities: * [skaffold version](#skaffold-version) - get Skaffold version * [skaffold completion](#skaffold-completion) - setup tab completion for the CLI * [skaffold config](#skaffold-config) - manage context specific parameters -* [skaffold diagnose](#skaffold-diagnose) - diagnostics of skaffold works in your project +* [skaffold diagnose](#skaffold-diagnose) - diagnostics of Skaffold works in your project ## Global flags @@ -43,7 +43,7 @@ Utilities: | Flag | Description | |------- |---------------| -|`SKAFFOLD_UPDATE_CHECK`|Enables checking for latest version of the skaffold binary. By default it's `true`. | +|`SKAFFOLD_UPDATE_CHECK`|Enables checking for latest version of the Skaffold binary. By default it's `true`. | ## Skaffold commands diff --git a/docs/content/en/docs/references/yaml/_index.html b/docs/content/en/docs/references/yaml/_index.html new file mode 100755 index 00000000000..67d6e3325ad --- /dev/null +++ b/docs/content/en/docs/references/yaml/_index.html @@ -0,0 +1,10 @@ +--- +title: "skaffold.yaml" +linkTitle: "skaffold.yaml" +weight: 120 +--- + + + + +
          diff --git a/docs/content/en/docs/references/yaml/main.css b/docs/content/en/docs/references/yaml/main.css new file mode 100644 index 00000000000..31052b09a22 --- /dev/null +++ b/docs/content/en/docs/references/yaml/main.css @@ -0,0 +1,67 @@ +.comment { + color: #008000; +} + +.key { + color: #811f3f; +} + +.key.required { + font-weight: bold; +} + +.value { + color: blue; +} + +.example { + color: gray; + font-weight: 100; +} + +.key, .value, .comment, .example { + font-family: 'Roboto Mono', monospace; + font-size: 14px; +} + +.key, .value, .example { + white-space: nowrap; +} + +table { + border-collapse: collapse; +} + +td { + border-bottom: 1px solid #EEE; + padding: 1px 0 !important; +} + +td:first-child { + padding: 1px 5px !important; +} + +td.top { + background-color: #EEE; +} + +td.top .comment { + color: gray; +} + +td:first-child { + white-space: nowrap; + padding-right: 10px; +} + +tr:hover td { + background: #DDD; +} + +code { + font-family: unset; + font-size: 85%; + border: 1px dotted gray; + border-radius: 3px; + padding: 0 .2em; +} diff --git a/docs/content/en/docs/references/yaml/main.js b/docs/content/en/docs/references/yaml/main.js new file mode 100644 index 00000000000..646ea9311af --- /dev/null +++ b/docs/content/en/docs/references/yaml/main.js @@ -0,0 +1,166 @@ +import { html, render } from "https://unpkg.com/lit-html@1.0.0/lit-html.js"; +import { unsafeHTML } from "https://unpkg.com/lit-html@1.0.0/directives/unsafe-html.js"; + +var version; +(async function() { + let l = new URL(import.meta.url); + version = l.hash.replace('#skaffold/', ''); + + const response = await fetch(`/schemas/${version}.json`); + const json = await response.json(); + + render(html`${template(json.definitions, undefined, '#/definitions/SkaffoldPipeline', 0)}`, document.getElementById("table")); +})(); + +function* template(definitions, parentDefinition, ref, ident) { + const name = ref.replace('#/definitions/', ''); + + let allProperties = []; + var properties = definitions[name].properties; + for (var key in properties) { + allProperties.push([key, properties[key]]); + } + if (definitions[name].anyOf) { + for (var properties of definitions[name].anyOf) { + for (var key in properties.properties) { + allProperties.push([key, properties.properties[key]]); + } + } + } + + let index = -1 + for (var [key, definition] of allProperties) { + var desc = definition.description; + let value = definition.default; + index++; + + if (key === 'apiVersion') { + value = `skaffold/${version}` + } + if (definition.examples) { + value = definition.examples[0] + } + let valueClass = definition.examples ? 'example' : 'value'; + + let required = false; + if (definitions[name].required) { + for (var requiredName of definitions[name].required) { + if (requiredName === key) { + required = true; + break; + } + } + } + let keyClass = required ? 'key required' : 'key'; + + // Special case for profiles + if (name === 'Profile') { + if ((key === 'build') || (key === 'test') || (key === 'deploy')) { + yield html` + + ${key}: {} + # + ${unsafeHTML(desc)} + + `; + continue + } + } + + if (definition.$ref) { + // Check if the referenced description is a final one + const refName = definition.$ref.replace('#/definitions/', ''); + if (!definitions[refName].properties && !definitions[refName].anyOf) { + value = '{}' + } + + yield html` + + ${key}: ${value} + + ${unsafeHTML(desc)} + + `; + } else if (definition.items && definition.items.$ref) { + yield html` + + ${key}: ${value} + + ${unsafeHTML(desc)} + + `; + } else if (parentDefinition && parentDefinition.type === 'array' && (index == 0)) { + yield html` + + - ${key}: ${value} + + ${unsafeHTML(desc)} + + `; + } else if ((definition.type === 'array') && value && (value != '[]')) { + // Parse value to json array + let values = JSON.parse(value); + + yield html` + + ${key}: + + ${unsafeHTML(desc)} + + `; + + for (var v of values) { + yield html` + + - ${v} + + + `; + } + } else if ((definition.type === 'object') && value && (value != '{}')) { + // Parse value to json object + let values = JSON.parse(value); + + yield html` + + ${key}: + + ${unsafeHTML(desc)} + + `; + + for (var k in values) { + let v = values[k]; + + yield html` + + ${k}: ${v} + + + `; + } + } else { + yield html` + + ${key}: ${value} + + ${unsafeHTML(desc)} + + `; + } + + // This definition references another definition + if (definition.$ref) { + yield html` + ${template(definitions, definition, definition.$ref, ident + 1)} + `; + } + + // This definition is an array + if (definition.items && definition.items.$ref) { + yield html` + ${template(definitions, definition, definition.items.$ref, ident + 1)} + `; + } + } +} diff --git a/docs/content/en/docs/resources/_index.md b/docs/content/en/docs/resources/_index.md index 123ed497a50..5611e14e948 100755 --- a/docs/content/en/docs/resources/_index.md +++ b/docs/content/en/docs/resources/_index.md @@ -1,13 +1,25 @@ - --- title: "Resources" linkTitle: "Resources" weight: 130 --- -## Roadmap +## 2019 Roadmap + +* Plugin model for builders +* IDE integration - VSCode and IntelliJ Skaffold dev/build/run/deploy support, Skaffold Config code completion +* Debugging JVM applications +* Provide help with integration testing +* Skaffold keeps track of what it built, for faster restarts +* Automated kubernetes manifest generation +* Pipeline CRD integration +* Infrastructure scaffolding for CI/CD on GCP/GKE +* Document end-to-end solutions +* Status dashboard for build (test) and deployment besides logging -{{% todo 134 %}} +{{< alert title="Note" >}} +The roadmap is subject to change and aspirational but we would like to share our plans with the user and contributor community. +{{< /alert >}} ## Contributing @@ -16,7 +28,7 @@ See [Contributing Guide](https://github.com/GoogleContainerTools/skaffold/blob/m and our [Code of Conduct](https://github.com/GoogleContainerTools/skaffold/blob/master/code-of-conduct.md) on GitHub. -## Release notes +## Release Notes See [Release Notes](https://github.com/GoogleContainerTools/skaffold/blob/master/CHANGELOG.md) on Github. @@ -35,4 +47,4 @@ Calendar. ## FAQ -## Troubleshooting \ No newline at end of file +## Troubleshooting diff --git a/docs/content/en/docs/tutorials/_index.md b/docs/content/en/docs/tutorials/_index.md index e5c2589a2a2..a084fa95dab 100755 --- a/docs/content/en/docs/tutorials/_index.md +++ b/docs/content/en/docs/tutorials/_index.md @@ -1,4 +1,3 @@ - --- title: "Tutorials" linkTitle: "Tutorials" @@ -6,3 +5,12 @@ weight: 90 --- See the [Github Examples page](https://github.com/GoogleContainerTools/skaffold/tree/master/examples) for examples. + +As we have gcr.io/k8s-skaffold in our image names, to run the examples, you have two options: + +1. manually replace the image repositories in skaffold.yaml from gcr.io/k8s-skaffold to yours +1. you can point skaffold to your default image repository in one of the four ways: + 1. flag: `skaffold dev --default-repo ` + 1. env var: `SKAFFOLD_DEFAULT_REPO= skaffold dev` + 1. global skaffold config (one time): `skaffold config set --global default-repo ` + 1. skaffold config for current kubectl context: `skaffold config set default-repo ` diff --git a/docs/content/en/samples/builders/bazel.yaml b/docs/content/en/samples/builders/bazel.yaml new file mode 100644 index 00000000000..e5a30c67bea --- /dev/null +++ b/docs/content/en/samples/builders/bazel.yaml @@ -0,0 +1,5 @@ +build: + artifacts: + - image: gcr.io/k8s-skaffold/example + bazel: + target: //:example.tar diff --git a/docs/content/en/samples/builders/gcb.yaml b/docs/content/en/samples/builders/gcb.yaml new file mode 100644 index 00000000000..15f0dd55556 --- /dev/null +++ b/docs/content/en/samples/builders/gcb.yaml @@ -0,0 +1,5 @@ +build: + artifacts: + - image: gcr.io/k8s-skaffold/example + googleCloudBuild: + projectId: YOUR-GCP-PROJECT diff --git a/docs/content/en/samples/builders/kaniko.yaml b/docs/content/en/samples/builders/kaniko.yaml new file mode 100644 index 00000000000..88fbfa75c3e --- /dev/null +++ b/docs/content/en/samples/builders/kaniko.yaml @@ -0,0 +1,6 @@ +build: + artifacts: + - image: gcr.io/k8s-skaffold/example + kaniko: + buildContext: + gcsBucket: YOUR-BUCKET \ No newline at end of file diff --git a/docs/content/en/samples/builders/local-full.yaml b/docs/content/en/samples/builders/local-full.yaml new file mode 100644 index 00000000000..d6ab64d3a36 --- /dev/null +++ b/docs/content/en/samples/builders/local-full.yaml @@ -0,0 +1,6 @@ +build: + artifacts: + - image: gcr.io/k8s-skaffold/example + local: + useDockerCLI: false + useBuildkit: false diff --git a/docs/content/en/samples/builders/local.yaml b/docs/content/en/samples/builders/local.yaml new file mode 100644 index 00000000000..30e684ab629 --- /dev/null +++ b/docs/content/en/samples/builders/local.yaml @@ -0,0 +1,4 @@ +build: + artifacts: + - image: gcr.io/k8s-skaffold/example + local: {} diff --git a/docs/content/en/samples/deployers/helm.yaml b/docs/content/en/samples/deployers/helm.yaml new file mode 100644 index 00000000000..f041e56c8ec --- /dev/null +++ b/docs/content/en/samples/deployers/helm.yaml @@ -0,0 +1,7 @@ +deploy: + helm: + releases: + - name: skaffold-helm + chartPath: skaffold-helm + values: + image: gcr.io/k8s-skaffold/skaffold-helm diff --git a/docs/content/en/samples/deployers/kubectl.yaml b/docs/content/en/samples/deployers/kubectl.yaml new file mode 100644 index 00000000000..1fd9199536f --- /dev/null +++ b/docs/content/en/samples/deployers/kubectl.yaml @@ -0,0 +1,4 @@ +deploy: + kubectl: + manifests: + - k8s-* diff --git a/docs/content/en/samples/deployers/kustomize.yaml b/docs/content/en/samples/deployers/kustomize.yaml new file mode 100644 index 00000000000..d3d39248519 --- /dev/null +++ b/docs/content/en/samples/deployers/kustomize.yaml @@ -0,0 +1,6 @@ +deploy: + kustomize: {} +# The deploy section above is equal to +# deploy: +# kustomize: +# path: "." diff --git a/docs/content/en/samples/profiles/profiles.yaml b/docs/content/en/samples/profiles/profiles.yaml new file mode 100644 index 00000000000..9017e0ee89e --- /dev/null +++ b/docs/content/en/samples/profiles/profiles.yaml @@ -0,0 +1,12 @@ +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +deploy: + kubectl: + manifests: + - k8s-pod +profiles: +- name: gcb + build: + googleCloudBuild: + projectId: k8s-skaffold diff --git a/docs/content/en/samples/taggers/dateTime.yaml b/docs/content/en/samples/taggers/dateTime.yaml new file mode 100644 index 00000000000..49af2dd2c8b --- /dev/null +++ b/docs/content/en/samples/taggers/dateTime.yaml @@ -0,0 +1,10 @@ +build: + tagPolicy: + dateTime: + format: "2006-01-02_15-04-05.999_MST" + timezone: "Local" + artifacts: + - image: gcr.io/k8s-skaffold/example +# The tagPolicy section above is equal to +# tagPolicy: +# dateTime: {} diff --git a/docs/content/en/samples/taggers/envTemplate.yaml b/docs/content/en/samples/taggers/envTemplate.yaml new file mode 100644 index 00000000000..45ed68ce876 --- /dev/null +++ b/docs/content/en/samples/taggers/envTemplate.yaml @@ -0,0 +1,6 @@ +build: + tagPolicy: + envTemplate: + template: "{{.IMAGE_NAME}}:{{.FOO}}" + artifacts: + - image: gcr.io/k8s-skaffold/example diff --git a/docs/content/en/samples/taggers/git.yaml b/docs/content/en/samples/taggers/git.yaml new file mode 100644 index 00000000000..f0f3a00b5f5 --- /dev/null +++ b/docs/content/en/samples/taggers/git.yaml @@ -0,0 +1,5 @@ +build: + tagPolicy: + gitCommit: {} + artifacts: + - image: gcr.io/k8s-skaffold/example diff --git a/docs/content/en/samples/taggers/sha256.yaml b/docs/content/en/samples/taggers/sha256.yaml new file mode 100644 index 00000000000..a0c21fe1977 --- /dev/null +++ b/docs/content/en/samples/taggers/sha256.yaml @@ -0,0 +1,5 @@ +build: + tagPolicy: + sha256: {} + artifacts: + - image: gcr.io/k8s-skaffold/example diff --git a/docs/content/en/samples/templating/env.yaml b/docs/content/en/samples/templating/env.yaml new file mode 100644 index 00000000000..45ed68ce876 --- /dev/null +++ b/docs/content/en/samples/templating/env.yaml @@ -0,0 +1,6 @@ +build: + tagPolicy: + envTemplate: + template: "{{.IMAGE_NAME}}:{{.FOO}}" + artifacts: + - image: gcr.io/k8s-skaffold/example diff --git a/docs/content/en/schemas/v1alpha1.json b/docs/content/en/schemas/v1alpha1.json new file mode 100755 index 00000000000..29518d1859f --- /dev/null +++ b/docs/content/en/schemas/v1alpha1.json @@ -0,0 +1,216 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "top level config object that is parsed from a skaffold.yaml" + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "type": "string" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "skipPush": { + "type": "boolean" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + } + }, + "additionalProperties": false + }, + "DeployConfig": { + "properties": { + "name": { + "type": "string" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "$ref": "#/definitions/Manifest" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "Manifest": { + "properties": { + "paths": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFilePath": { + "type": "string" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "additionalProperties": false + }, + "Artifact": { + "properties": { + "imageName": { + "type": "string" + }, + "dockerfilePath": { + "type": "string" + }, + "workspace": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "description": "represents items that need should be built, along with the context in which they should be built." + } + } +} diff --git a/docs/content/en/schemas/v1alpha2.json b/docs/content/en/schemas/v1alpha2.json new file mode 100755 index 00000000000..62551eaa58b --- /dev/null +++ b/docs/content/en/schemas/v1alpha2.json @@ -0,0 +1,511 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "skipPush": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "KanikoBuild": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "kustomizePath": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFilePath": { + "type": "string" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "imageName": { + "type": "string" + }, + "workspace": { + "type": "string" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfilePath": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1alpha3.json b/docs/content/en/schemas/v1alpha3.json new file mode 100755 index 00000000000..ab3dc28a44b --- /dev/null +++ b/docs/content/en/schemas/v1alpha3.json @@ -0,0 +1,524 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "skipPush": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "kustomizePath": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "imageName": { + "type": "string" + }, + "workspace": { + "type": "string" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfilePath": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1alpha4.json b/docs/content/en/schemas/v1alpha4.json new file mode 100755 index 00000000000..83ca54de0f1 --- /dev/null +++ b/docs/content/en/schemas/v1alpha4.json @@ -0,0 +1,617 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "TestConfig": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "additionalProperties": false, + "type": "array" + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "LocalDir": { + "additionalProperties": false, + "description": "represents the local directory kaniko build context" + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "localDir": { + "$ref": "#/definitions/LocalDir" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "image": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "TestCase": { + "properties": { + "image": { + "type": "string" + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "a struct containing all the specified test configuration for an image." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kustomize." + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "image": { + "type": "string" + }, + "context": { + "type": "string" + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "type": "array" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "Only multi-module" + }, + "profile": { + "type": "string" + } + }, + "additionalProperties": false + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "Only multi-module" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1alpha5.json b/docs/content/en/schemas/v1alpha5.json new file mode 100755 index 00000000000..5b0da351305 --- /dev/null +++ b/docs/content/en/schemas/v1alpha5.json @@ -0,0 +1,642 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + }, + { + "properties": { + "acr": { + "$ref": "#/definitions/AzureContainerBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + }, + "acr": { + "$ref": "#/definitions/AzureContainerBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "LocalDir": { + "additionalProperties": false, + "description": "represents the local directory kaniko build context" + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "localDir": { + "$ref": "#/definitions/LocalDir" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "image": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "AzureContainerBuild": { + "properties": { + "subscriptionId": { + "type": "string" + }, + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "tenantId": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on Azure Container Registry" + }, + "TestConfig": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "additionalProperties": false, + "type": "array" + }, + "TestCase": { + "properties": { + "image": { + "type": "string" + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "a struct containing all the specified test configuration for an image." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kustomize." + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "image": { + "type": "string" + }, + "context": { + "type": "string" + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "Only multi-module" + }, + "profile": { + "type": "string" + } + }, + "additionalProperties": false + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "Only multi-module" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1beta1.json b/docs/content/en/schemas/v1beta1.json new file mode 100755 index 00000000000..a569cae2d4d --- /dev/null +++ b/docs/content/en/schemas/v1beta1.json @@ -0,0 +1,633 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "LocalDir": { + "additionalProperties": false, + "description": "represents the local directory kaniko build context" + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "localDir": { + "$ref": "#/definitions/LocalDir" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoCache": { + "properties": { + "repo": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains fields related to kaniko caching" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "cache": { + "$ref": "#/definitions/KanikoCache" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "image": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "TestConfig": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "additionalProperties": false, + "type": "array" + }, + "TestCase": { + "properties": { + "image": { + "type": "string" + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "a struct containing all the specified test configuration for an image." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kustomize." + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "image": { + "type": "string" + }, + "context": { + "type": "string" + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "Only multi-module" + }, + "profile": { + "type": "string" + } + }, + "additionalProperties": false + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "Only multi-module" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1beta2.json b/docs/content/en/schemas/v1beta2.json new file mode 100755 index 00000000000..087bde778b7 --- /dev/null +++ b/docs/content/en/schemas/v1beta2.json @@ -0,0 +1,640 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "LocalDir": { + "additionalProperties": false, + "description": "represents the local directory kaniko build context" + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "localDir": { + "$ref": "#/definitions/LocalDir" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoCache": { + "properties": { + "repo": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains fields related to kaniko caching" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "cache": { + "$ref": "#/definitions/KanikoCache" + }, + "flags": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "image": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "TestConfig": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "additionalProperties": false, + "type": "array" + }, + "TestCase": { + "properties": { + "image": { + "type": "string" + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "a struct containing all the specified test configuration for an image." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kustomize." + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "image": { + "type": "string" + }, + "context": { + "type": "string" + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "Only multi-module" + }, + "profile": { + "type": "string" + } + }, + "additionalProperties": false + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "Only multi-module" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1beta3.json b/docs/content/en/schemas/v1beta3.json new file mode 100755 index 00000000000..057bb166e81 --- /dev/null +++ b/docs/content/en/schemas/v1beta3.json @@ -0,0 +1,661 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + }, + "mavenImage": { + "type": "string" + }, + "gradleImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "LocalDir": { + "additionalProperties": false, + "description": "represents the local directory kaniko build context" + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "localDir": { + "$ref": "#/definitions/LocalDir" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoCache": { + "properties": { + "repo": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains fields related to kaniko caching" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "cache": { + "$ref": "#/definitions/KanikoCache" + }, + "flags": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "image": { + "type": "string" + }, + "dockerConfig": { + "$ref": "#/definitions/DockerConfig" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "DockerConfig": { + "properties": { + "path": { + "type": "string" + }, + "secretName": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains information about the docker config.json to mount" + }, + "TestConfig": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "additionalProperties": false, + "type": "array" + }, + "TestCase": { + "properties": { + "image": { + "type": "string" + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "a struct containing all the specified test configuration for an image." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kustomize." + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "image": { + "type": "string" + }, + "context": { + "type": "string" + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "Only multi-module" + }, + "profile": { + "type": "string" + } + }, + "additionalProperties": false + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "Only multi-module" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1beta4.json b/docs/content/en/schemas/v1beta4.json new file mode 100755 index 00000000000..44db92990b8 --- /dev/null +++ b/docs/content/en/schemas/v1beta4.json @@ -0,0 +1,687 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array" + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array" + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + } + } + ], + "description": "contains all the configuration for the build steps" + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger" + }, + "sha256": { + "$ref": "#/definitions/ShaTagger" + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger" + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger" + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step" + }, + "ShaTagger": { + "additionalProperties": false, + "description": "contains the configuration for the SHA tagger." + }, + "GitTagger": { + "additionalProperties": false, + "description": "contains the configuration for the git tagger." + }, + "EnvTemplateTagger": { + "properties": { + "template": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the envTemplate tagger." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the configuration for the DateTime tagger." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild" + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild" + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean" + }, + "useDockerCLI": { + "type": "boolean", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "default": "false" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string" + }, + "diskSizeGb": { + "type": "number" + }, + "machineType": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "dockerImage": { + "type": "string" + }, + "mavenImage": { + "type": "string" + }, + "gradleImage": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a remote build on Google Cloud Build." + }, + "LocalDir": { + "additionalProperties": false, + "description": "represents the local directory kaniko build context" + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string" + }, + "localDir": { + "$ref": "#/definitions/LocalDir" + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a kaniko build context" + }, + "KanikoCache": { + "properties": { + "repo": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains fields related to kaniko caching" + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext" + }, + "cache": { + "$ref": "#/definitions/KanikoCache" + }, + "flags": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "pullSecret": { + "type": "string" + }, + "pullSecretName": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "timeout": { + "type": "string" + }, + "image": { + "type": "string" + }, + "dockerConfig": { + "$ref": "#/definitions/DockerConfig" + } + }, + "additionalProperties": false, + "description": "contains the fields needed to do a on-cluster build using the kaniko image" + }, + "DockerConfig": { + "properties": { + "path": { + "type": "string" + }, + "secretName": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "contains information about the docker config.json to mount" + }, + "TestConfig": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "additionalProperties": false, + "type": "array" + }, + "TestCase": { + "properties": { + "image": { + "type": "string" + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "a struct containing all the specified test configuration for an image." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps" + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy" + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy" + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy" + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kubectl apply" + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes additional options flags that are passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with helm" + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags" + } + }, + "additionalProperties": false, + "description": "contains the configuration needed for deploying with kustomize." + }, + "HelmRelease": { + "properties": { + "name": { + "type": "string" + }, + "chartPath": { + "type": "string" + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "namespace": { + "type": "string" + }, + "version": { + "type": "string" + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "wait": { + "type": "boolean", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "default": "false" + }, + "skipBuildDependencies": { + "type": "boolean", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged" + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy" + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "set the appVersion on the chart to this version" + } + }, + "additionalProperties": false, + "description": "represents parameters for packaging helm chart." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + } + } + ] + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig" + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig" + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "represents image config to use the FullyQualifiedImageName as param to set" + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "represents image config in the syntax of image.repository and image.tag" + }, + "Artifact": { + "properties": { + "image": { + "type": "string" + }, + "context": { + "type": "string" + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact" + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + } + } + ], + "description": "represents items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "properties": { + "name": { + "type": "string" + }, + "build": { + "$ref": "#/definitions/BuildConfig" + }, + "test": { + "$ref": "#/definitions/TestConfig" + }, + "deploy": { + "$ref": "#/definitions/DeployConfig" + }, + "patches": {}, + "activation": { + "items": { + "$ref": "#/definitions/Activation" + }, + "type": "array" + } + }, + "additionalProperties": false, + "description": "additional configuration that overrides default configuration when it is activated." + }, + "Activation": { + "properties": { + "env": { + "type": "string" + }, + "kubeContext": { + "type": "string" + }, + "command": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "defines criteria to auto-activate a profile." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact" + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact" + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact" + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact" + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string" + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "default": "{}" + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + }, + "target": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "properties": { + "target": { + "type": "string" + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "Only multi-module" + }, + "profile": { + "type": "string" + } + }, + "additionalProperties": false + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "Only multi-module" + } + }, + "additionalProperties": false + } + } +} diff --git a/docs/content/en/schemas/v1beta5.json b/docs/content/en/schemas/v1beta5.json new file mode 100755 index 00000000000..881509c2505 --- /dev/null +++ b/docs/content/en/schemas/v1beta5.json @@ -0,0 +1,967 @@ +{ + "type": "object", + "anyOf": [ + { + "$ref": "#/definitions/SkaffoldPipeline" + } + ], + "definitions": { + "SkaffoldPipeline": { + "properties": { + "apiVersion": { + "type": "string", + "description": "version of the configuration." + }, + "kind": { + "type": "string", + "description": "always Config.", + "default": "Config" + }, + "build": { + "$ref": "#/definitions/BuildConfig", + "description": "describes how images are built." + }, + "test": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "type": "array", + "description": "describes how images are tested." + }, + "deploy": { + "$ref": "#/definitions/DeployConfig", + "description": "describes how images are deployed." + }, + "profiles": { + "items": { + "$ref": "#/definitions/Profile" + }, + "type": "array", + "description": "(beta) can override be used to build, test or deploy configuration." + } + }, + "additionalProperties": false + }, + "BuildConfig": { + "properties": { + "artifacts": { + "items": { + "$ref": "#/definitions/Artifact" + }, + "type": "array", + "description": "the images you're going to be building." + }, + "tagPolicy": { + "$ref": "#/definitions/TagPolicy", + "description": "(beta) determines how images are tagged. A few strategies are provided here, although you most likely won't need to care! If not specified, it defaults to gitCommit: {}." + }, + "executionEnvironment": { + "$ref": "#/definitions/ExecutionEnvironment", + "description": "environment in which the build should run. Possible values: googleCloudBuild." + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild", + "description": "(beta) describes how to do a build on the local docker daemon and optionally push to a repository." + } + } + }, + { + "properties": { + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild", + "description": "(beta) describes how to do a remote build on Google Cloud Build." + } + } + }, + { + "properties": { + "kaniko": { + "$ref": "#/definitions/KanikoBuild", + "description": "(beta) describes how to do an on-cluster build using Kaniko." + } + } + } + ], + "description": "contains all the configuration for the build steps." + }, + "ExecEnvironment": { + "additionalProperties": false, + "type": "string" + }, + "ExecutionEnvironment": { + "properties": { + "name": { + "$ref": "#/definitions/ExecEnvironment", + "description": "name of the environment." + }, + "properties": { + "additionalProperties": {}, + "type": "object", + "description": "key-value pairs passed to the environment.", + "default": "{}" + } + }, + "additionalProperties": false, + "description": "environment in which the build should run (ex. local or in-cluster, etc.)." + }, + "BuilderPlugin": { + "properties": { + "name": { + "type": "string", + "description": "name of the build plugin." + }, + "properties": { + "additionalProperties": {}, + "type": "object", + "description": "key-value pairs passed to the plugin.", + "default": "{}" + } + }, + "additionalProperties": false, + "description": "contains all fields necessary for specifying a build plugin." + }, + "TagPolicy": { + "properties": { + "gitCommit": { + "$ref": "#/definitions/GitTagger", + "description": "(beta) tags images with the git tag or commit of the artifact's workspace." + }, + "sha256": { + "$ref": "#/definitions/ShaTagger", + "description": "(beta) tags images with their sha256 digest." + }, + "envTemplate": { + "$ref": "#/definitions/EnvTemplateTagger", + "description": "(beta) tags images with a configurable template string." + }, + "dateTime": { + "$ref": "#/definitions/DateTimeTagger", + "description": "(beta) tags images with the build timestamp." + } + }, + "additionalProperties": false, + "description": "contains all the configuration for the tagging step." + }, + "ShaTagger": { + "additionalProperties": false, + "description": "(beta) tags images with their sha256 digest." + }, + "GitTagger": { + "additionalProperties": false, + "description": "(beta) tags images with the git tag or commit of the artifact's workspace." + }, + "EnvTemplateTagger": { + "required": [ + "template" + ], + "properties": { + "template": { + "type": "string", + "description": "used to produce the image name and tag. See golang text/template. The template is executed against the current environment, with those variables injected: IMAGE_NAME | Name of the image being built, as supplied in the artifacts section.", + "examples": [ + "{{.RELEASE}}-{{.IMAGE_NAME}}" + ] + } + }, + "additionalProperties": false, + "description": "(beta) tags images with a configurable template string." + }, + "DateTimeTagger": { + "properties": { + "format": { + "type": "string", + "description": "formats the date and time. See #Time.Format.", + "default": "2006-01-02_15-04-05.999_MST" + }, + "timezone": { + "type": "string", + "description": "sets the timezone for the date and time. See Time.LoadLocation. Defaults to the local timezone." + } + }, + "additionalProperties": false, + "description": "(beta) tags images with the build timestamp." + }, + "BuildType": { + "properties": { + "local": { + "$ref": "#/definitions/LocalBuild", + "description": "(beta) describes how to do a build on the local docker daemon and optionally push to a repository." + }, + "googleCloudBuild": { + "$ref": "#/definitions/GoogleCloudBuild", + "description": "(beta) describes how to do a remote build on Google Cloud Build." + }, + "kaniko": { + "$ref": "#/definitions/KanikoBuild", + "description": "(beta) describes how to do an on-cluster build using Kaniko." + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the build step. Only one field should be populated." + }, + "LocalBuild": { + "properties": { + "push": { + "type": "boolean", + "description": "should images be pushed to a registry. If not specified, images are pushed only if the current Kubernetes context connects to a remote cluster." + }, + "useDockerCLI": { + "type": "boolean", + "description": "use docker command-line interface instead of Docker Engine APIs.", + "default": "false" + }, + "useBuildkit": { + "type": "boolean", + "description": "use BuildKit to build Docker images.", + "default": "false" + } + }, + "additionalProperties": false, + "description": "(beta) describes how to do a build on the local docker daemon and optionally push to a repository." + }, + "GoogleCloudBuild": { + "properties": { + "projectId": { + "type": "string", + "description": "ID of your Cloud Platform Project. If it is not provided, Skaffold will guess it from the image name. For example, given the artifact image name gcr.io/myproject/image, Skaffold will use the myproject GCP project." + }, + "diskSizeGb": { + "type": "number", + "description": "disk size of the VM that runs the build. See Cloud Build Reference." + }, + "machineType": { + "type": "string", + "description": "type of the VM that runs the build. See Cloud Build Reference." + }, + "timeout": { + "type": "string", + "description": "amount of time (in seconds) that this build should be allowed to run. See Cloud Build Reference." + }, + "dockerImage": { + "type": "string", + "description": "image that runs a Docker build. See Cloud Builders.", + "default": "gcr.io/cloud-builders/docker" + }, + "mavenImage": { + "type": "string", + "description": "image that runs a Maven build. See Cloud Builders.", + "default": "gcr.io/cloud-builders/mvn" + }, + "gradleImage": { + "type": "string", + "description": "image that runs a Gradle build. See Cloud Builders.", + "default": "gcr.io/cloud-builders/gradle" + } + }, + "additionalProperties": false, + "description": "(beta) describes how to do a remote build on Google Cloud Build. Docker and Jib artifacts can be built on Cloud Build. The projectId needs to be provided and the currently logged in user should be given permissions to trigger new builds." + }, + "LocalDir": { + "additionalProperties": false, + "description": "configures how Kaniko mounts sources directly via an emptyDir volume." + }, + "KanikoBuildContext": { + "properties": { + "gcsBucket": { + "type": "string", + "description": "CGS bucket to which sources are uploaded by Skaffold. Kaniko will need access to that bucket to download the sources." + }, + "localDir": { + "$ref": "#/definitions/LocalDir", + "description": "configures how Kaniko mounts sources directly via an emptyDir volume." + } + }, + "additionalProperties": false, + "description": "contains the different fields available to specify a Kaniko build context." + }, + "KanikoCache": { + "properties": { + "repo": { + "type": "string", + "description": "a remote repository to store cached layers. If none is specified, one will be inferred from the image name. See Kaniko Caching." + } + }, + "additionalProperties": false, + "description": "configures Kaniko caching. If a cache is specified, Kaniko will use a remote cache which will speed up builds." + }, + "KanikoBuild": { + "properties": { + "buildContext": { + "$ref": "#/definitions/KanikoBuildContext", + "description": "defines where Kaniko gets the sources from." + }, + "cache": { + "$ref": "#/definitions/KanikoCache", + "description": "configures Kaniko caching. If a cache is specified, Kaniko will use a remote cache which will speed up builds." + }, + "flags": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional flags to be passed to Kaniko command line. See Kaniko Additional Flags.", + "default": "[]" + }, + "pullSecret": { + "type": "string", + "description": "path to the secret key file. See Kaniko Documentation." + }, + "pullSecretName": { + "type": "string", + "description": "name of the Kubernetes secret for pulling the files from the build context and pushing the final image.", + "default": "kaniko-secret" + }, + "namespace": { + "type": "string", + "description": "Kubernetes namespace. Defaults to current namespace in Kubernetes configuration." + }, + "timeout": { + "type": "string", + "description": "amount of time (in seconds) that this build is allowed to run. Defaults to 20 minutes (20m)." + }, + "image": { + "type": "string", + "description": "Docker image used by the Kaniko pod. Defaults to the latest released version of gcr.io/kaniko-project/executor." + }, + "dockerConfig": { + "$ref": "#/definitions/DockerConfig", + "description": "describes how to mount the local Docker configuration into the Kaniko pod." + } + }, + "additionalProperties": false, + "description": "(beta) describes how to do an on-cluster build using Kaniko." + }, + "DockerConfig": { + "properties": { + "path": { + "type": "string", + "description": "path to the docker config.json." + }, + "secretName": { + "type": "string", + "description": "Kubernetes secret that will hold the Docker configuration." + } + }, + "additionalProperties": false, + "description": "contains information about the docker config.json to mount." + }, + "TestCase": { + "required": [ + "image" + ], + "properties": { + "image": { + "type": "string", + "description": "artifact on which to run those tests.", + "examples": [ + "gcr.io/k8s-skaffold/example" + ] + }, + "structureTests": { + "items": { + "type": "string" + }, + "type": "array", + "description": "the Container Structure Tests to run on that artifact.", + "default": "[]", + "examples": [ + "[\"./test/*\"]" + ] + } + }, + "additionalProperties": false, + "description": "a list of structure tests to run on images that Skaffold builds." + }, + "DeployConfig": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy", + "description": "(beta) uses the helm CLI to apply the charts to the cluster." + } + } + }, + { + "properties": { + "kubectl": { + "$ref": "#/definitions/KubectlDeploy", + "description": "(beta) uses a client side kubectl apply to deploy manifests. You'll need a kubectl CLI version installed that's compatible with your cluster." + } + } + }, + { + "properties": { + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy", + "description": "(beta) uses the kustomize CLI to "patch" a deployment for a target environment." + } + } + } + ], + "description": "contains all the configuration needed by the deploy steps." + }, + "DeployType": { + "properties": { + "helm": { + "$ref": "#/definitions/HelmDeploy", + "description": "(beta) uses the helm CLI to apply the charts to the cluster." + }, + "kubectl": { + "$ref": "#/definitions/KubectlDeploy", + "description": "(beta) uses a client side kubectl apply to deploy manifests. You'll need a kubectl CLI version installed that's compatible with your cluster." + }, + "kustomize": { + "$ref": "#/definitions/KustomizeDeploy", + "description": "(beta) uses the kustomize CLI to "patch" a deployment for a target environment." + } + }, + "additionalProperties": false, + "description": "contains the specific implementation and parameters needed for the deploy step. Only one field should be populated." + }, + "KubectlDeploy": { + "properties": { + "manifests": { + "items": { + "type": "string" + }, + "type": "array", + "description": "the Kubernetes yaml or json manifests.", + "default": "[\"k8s/*.yaml\"]" + }, + "remoteManifests": { + "items": { + "type": "string" + }, + "type": "array", + "description": "Kubernetes manifests in remote clusters.", + "default": "[]" + }, + "flags": { + "$ref": "#/definitions/KubectlFlags", + "description": "additional flags passed to kubectl." + } + }, + "additionalProperties": false, + "description": "(beta) uses a client side kubectl apply to deploy manifests. You'll need a kubectl CLI version installed that's compatible with your cluster." + }, + "KubectlFlags": { + "properties": { + "global": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional flags passed on every command.", + "default": "[]" + }, + "apply": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional flags passed on creations (kubectl apply).", + "default": "[]" + }, + "delete": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional flags passed on deletions (kubectl delete).", + "default": "[]" + } + }, + "additionalProperties": false, + "description": "additional flags passed on the command line to kubectl either on every command (Global), on creations (Apply) or deletions (Delete)." + }, + "HelmDeploy": { + "required": [ + "releases" + ], + "properties": { + "releases": { + "items": { + "$ref": "#/definitions/HelmRelease" + }, + "type": "array", + "description": "a list of Helm releases." + } + }, + "additionalProperties": false, + "description": "(beta) uses the helm CLI to apply the charts to the cluster." + }, + "KustomizeDeploy": { + "properties": { + "path": { + "type": "string", + "description": "path to Kustomization files.", + "default": "." + }, + "flags": { + "$ref": "#/definitions/KubectlFlags", + "description": "additional flags passed to kubectl." + } + }, + "additionalProperties": false, + "description": "(beta) uses the kustomize CLI to "patch" a deployment for a target environment." + }, + "HelmRelease": { + "required": [ + "name", + "chartPath" + ], + "properties": { + "name": { + "type": "string", + "description": "name of the Helm release." + }, + "chartPath": { + "type": "string", + "description": "path to the Helm chart." + }, + "valuesFiles": { + "items": { + "type": "string" + }, + "type": "array", + "description": "paths to the Helm values files".", + "default": "[]" + }, + "values": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "description": "key-value pairs supplementing the Helm values file".", + "default": "{}" + }, + "namespace": { + "type": "string", + "description": "Kubernetes namespace." + }, + "version": { + "type": "string", + "description": "version of the chart." + }, + "setValues": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "description": "key-value pairs. If present, Skaffold will send --set flag to Helm CLI and append all pairs after the flag.", + "default": "{}" + }, + "setValueTemplates": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "description": "key-value pairs. If present, Skaffold will try to parse the value part of each key-value pair using environment variables in the system, then send --set flag to Helm CLI and append all parsed pairs after the flag.", + "default": "{}" + }, + "wait": { + "type": "boolean", + "description": "if true, Skaffold will send --wait flag to Helm CLI.", + "default": "false" + }, + "recreatePods": { + "type": "boolean", + "description": "if true, Skaffold will send --recreate-pods flag to Helm CLI.", + "default": "false" + }, + "skipBuildDependencies": { + "type": "boolean", + "description": "should build dependencies be skipped.", + "default": "false" + }, + "overrides": { + "additionalProperties": {}, + "type": "object", + "description": "key-value pairs. If present, Skaffold will build a Helm values file that overrides the original and use it to call Helm CLI (--f flag).", + "default": "{}" + }, + "packaged": { + "$ref": "#/definitions/HelmPackaged", + "description": "parameters for packaging helm chart (helm package)." + }, + "imageStrategy": { + "$ref": "#/definitions/HelmImageStrategy", + "description": "adds image configurations to the Helm values file." + } + }, + "additionalProperties": false + }, + "HelmPackaged": { + "properties": { + "version": { + "type": "string", + "description": "sets the version on the chart to this semver version." + }, + "appVersion": { + "type": "string", + "description": "sets the appVersion on the chart to this version." + } + }, + "additionalProperties": false, + "description": "parameters for packaging helm chart (helm package)." + }, + "HelmImageStrategy": { + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig", + "description": "image configuration uses the syntax IMAGE-NAME=IMAGE-REPOSITORY:IMAGE-TAG." + } + } + }, + { + "properties": { + "helm": { + "$ref": "#/definitions/HelmConventionConfig", + "description": "image configuration uses the syntax IMAGE-NAME.repository=IMAGE-REPOSITORY, IMAGE-NAME.tag=IMAGE-TAG." + } + } + } + ], + "description": "adds image configurations to the Helm values file." + }, + "HelmImageConfig": { + "properties": { + "fqn": { + "$ref": "#/definitions/HelmFQNConfig", + "description": "image configuration uses the syntax IMAGE-NAME=IMAGE-REPOSITORY:IMAGE-TAG." + }, + "helm": { + "$ref": "#/definitions/HelmConventionConfig", + "description": "image configuration uses the syntax IMAGE-NAME.repository=IMAGE-REPOSITORY, IMAGE-NAME.tag=IMAGE-TAG." + } + }, + "additionalProperties": false + }, + "HelmFQNConfig": { + "properties": { + "property": { + "type": "string", + "description": "defines the image config." + } + }, + "additionalProperties": false, + "description": "image config to use the FullyQualifiedImageName as param to set." + }, + "HelmConventionConfig": { + "additionalProperties": false, + "description": "image config in the syntax of image.repository and image.tag." + }, + "Artifact": { + "required": [ + "image" + ], + "properties": { + "image": { + "type": "string", + "description": "name of the image to be built.", + "examples": [ + "gcr.io/k8s-skaffold/example" + ] + }, + "context": { + "type": "string", + "description": "directory where the artifact's sources are to be found.", + "default": "." + }, + "sync": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "description": "(alpha) lists local files that can be synced to remote pods instead of triggering an image build when modified. This is a mapping of local files to sync to remote folders.", + "default": "{}", + "examples": [ + "{\"*.py\": \".\", \"css/**/*.css\": \"app/css\"}" + ] + }, + "plugin": { + "$ref": "#/definitions/BuilderPlugin", + "description": "plugin used to build this artifact." + } + }, + "additionalProperties": false, + "anyOf": [ + { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact", + "description": "(beta) describes an artifact built from a Dockerfile." + } + } + }, + { + "properties": { + "bazel": { + "$ref": "#/definitions/BazelArtifact", + "description": "(beta) requires bazel CLI to be installed and the sources to contain Bazel configuration files." + } + } + }, + { + "properties": { + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact", + "description": "(alpha) builds images using the Jib plugin for Maven." + } + } + }, + { + "properties": { + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact", + "description": "(alpha) builds images using the Jib plugin for Gradle." + } + } + } + ], + "description": "items that need to be built, along with the context in which they should be built." + }, + "Profile": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "a unique profile name.", + "examples": [ + "profile-prod" + ] + }, + "build": { + "$ref": "#/definitions/BuildConfig", + "description": "replaces the main build configuration." + }, + "test": { + "items": { + "$ref": "#/definitions/TestCase" + }, + "type": "array", + "description": "replaces the main test configuration." + }, + "deploy": { + "$ref": "#/definitions/DeployConfig", + "description": "replaces the main deploy configuration." + }, + "patches": { + "items": { + "$ref": "#/definitions/JSONPatch" + }, + "type": "array", + "description": "a list of patches applied to the configuration. Patches use the JSON patch notation." + }, + "activation": { + "items": { + "$ref": "#/definitions/Activation" + }, + "type": "array", + "description": "criteria by which a profile can be auto-activated." + } + }, + "additionalProperties": false, + "description": "(beta) profiles are used to override any build, test or deploy configuration." + }, + "JSONPatch": { + "required": [ + "path" + ], + "properties": { + "op": { + "type": "string", + "description": "operation carried by the patch: add, remove, replace, move, copy or test.", + "default": "replace" + }, + "path": { + "type": "string", + "description": "position in the yaml where the operation takes place. For example, this targets the dockerfile of the first artifact built.", + "examples": [ + "/build/artifacts/0/docker/dockerfile" + ] + }, + "from": { + "type": "string", + "description": "source position in the yaml, used for copy or move operations." + }, + "value": { + "type": "object", + "description": "value to apply. Can be any portion of yaml." + } + }, + "additionalProperties": false, + "description": "patch to be applied by a profile." + }, + "Activation": { + "properties": { + "env": { + "type": "string", + "description": "a key=value pair. The profile is auto-activated if an Environment Variable key has value value.", + "examples": [ + "ENV=production" + ] + }, + "kubeContext": { + "type": "string", + "description": "a Kubernetes context for which the profile is auto-activated.", + "examples": [ + "minikube" + ] + }, + "command": { + "type": "string", + "description": "a Skaffold command for which the profile is auto-activated.", + "examples": [ + "dev" + ] + } + }, + "additionalProperties": false, + "description": "criteria by which a profile is auto-activated." + }, + "ArtifactType": { + "properties": { + "docker": { + "$ref": "#/definitions/DockerArtifact", + "description": "(beta) describes an artifact built from a Dockerfile." + }, + "bazel": { + "$ref": "#/definitions/BazelArtifact", + "description": "(beta) requires bazel CLI to be installed and the sources to contain Bazel configuration files." + }, + "jibMaven": { + "$ref": "#/definitions/JibMavenArtifact", + "description": "(alpha) builds images using the Jib plugin for Maven." + }, + "jibGradle": { + "$ref": "#/definitions/JibGradleArtifact", + "description": "(alpha) builds images using the Jib plugin for Gradle." + } + }, + "additionalProperties": false + }, + "DockerArtifact": { + "properties": { + "dockerfile": { + "type": "string", + "description": "locates the Dockerfile relative to workspace.", + "default": "Dockerfile" + }, + "target": { + "type": "string", + "description": "Dockerfile target name to build." + }, + "buildArgs": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "description": "arguments passed to the docker build.", + "default": "{}", + "examples": [ + "{\"key1\": \"value1\", \"key2\": \"value2\"}" + ] + }, + "cacheFrom": { + "items": { + "type": "string" + }, + "type": "array", + "description": "the Docker images to consider as cache sources.", + "default": "[]", + "examples": [ + "[\"golang:1.10.1-alpine3.7\", \"alpine:3.7\"]" + ] + } + }, + "additionalProperties": false, + "description": "(beta) describes an artifact built from a Dockerfile, usually using docker build." + }, + "BazelArtifact": { + "required": [ + "target" + ], + "properties": { + "target": { + "type": "string", + "description": "bazel build target to run.", + "examples": [ + "//:skaffold_example.tar" + ] + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional args to pass to bazel build.", + "default": "[]", + "examples": [ + "[\"-flag\", \"--otherflag\"]" + ] + } + }, + "additionalProperties": false, + "description": "(beta) describes an artifact built with Bazel." + }, + "JibMavenArtifact": { + "properties": { + "module": { + "type": "string", + "description": "selects which Maven module to build, for a multi module project." + }, + "profile": { + "type": "string", + "description": "selects which Maven profile to activate." + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional build flags passed to Maven.", + "default": "[]", + "examples": [ + "[\"-x\", \"-DskipTests\"]" + ] + } + }, + "additionalProperties": false, + "description": "(alpha) builds images using the Jib plugin for Maven." + }, + "JibGradleArtifact": { + "properties": { + "project": { + "type": "string", + "description": "selects which Gradle project to build." + }, + "args": { + "items": { + "type": "string" + }, + "type": "array", + "description": "additional build flags passed to Gradle.", + "default": "[]", + "examples": [ + "[\"--no-build-cache\"]" + ] + } + }, + "additionalProperties": false, + "description": "(alpha) builds images using the Jib plugin for Gradle." + } + } +} diff --git a/docs/content/en/search.md b/docs/content/en/search.md index e3690fd5a81..4cde3a93d32 100644 --- a/docs/content/en/search.md +++ b/docs/content/en/search.md @@ -1,6 +1,5 @@ --- title: Search Results layout: search - --- diff --git a/docs/layouts/shortcodes/readfile.html b/docs/layouts/shortcodes/readfile.html new file mode 100644 index 00000000000..905eca9feb5 --- /dev/null +++ b/docs/layouts/shortcodes/readfile.html @@ -0,0 +1 @@ +{{ (printf "```yaml\n%s\n```" ((.Get "file") | readFile)) | markdownify }} diff --git a/docs/layouts/shortcodes/schema.html b/docs/layouts/shortcodes/schema.html new file mode 100644 index 00000000000..83fb667b660 --- /dev/null +++ b/docs/layouts/shortcodes/schema.html @@ -0,0 +1,61 @@ +{{ $root := .Get "root" }} +{{ if not $root}} + {{ errorf "missing value for param 'root': %s" .Position }} +{{ end }} + +{{ $version := $.Page.Site.Params.skaffold_version }} +{{ $short_version := index (split $version "/") 1 }} +{{ $json_schema := printf "schemas/%s.json" $short_version }} +{{ if not (fileExists $json_schema)}} + {{ errorf "file not found %s at %s" $json_schema .Position }} +{{ end }} + +{{ $schema := (readFile $json_schema) | unmarshal }} +{{ if not $schema}} + {{ errorf "missing json schema: %s" .Position }} +{{ end }} + +{{ $definition := index $schema.definitions $root }} + +{{ $showDefaults := false }} +{{ range $definition.properties }} + {{ if .default }} + {{ $showDefaults = true }} + {{ end }} +{{ end }} + + + + + + + + + {{ if $showDefaults }}{{ end }} + + + + {{range $k, $v := $definition.properties}} + + + + {{ if $showDefaults }}{{ end }} + + {{end}} + +
          OptionDescriptionDefault
          {{$k}}{{if .required}}Required {{end}}{{.description | markdownify}}{{ .default }}
          + \ No newline at end of file diff --git a/docs/static/favicons/android-144x144.png b/docs/static/favicons/android-144x144.png index 9367565548c..880d75c1505 100644 Binary files a/docs/static/favicons/android-144x144.png and b/docs/static/favicons/android-144x144.png differ diff --git a/docs/static/favicons/android-192x192.png b/docs/static/favicons/android-192x192.png index 087315a23de..e0d272e92f5 100644 Binary files a/docs/static/favicons/android-192x192.png and b/docs/static/favicons/android-192x192.png differ diff --git a/docs/static/favicons/android-36x36.png b/docs/static/favicons/android-36x36.png index 37db51dcc58..f3f5e07b71b 100644 Binary files a/docs/static/favicons/android-36x36.png and b/docs/static/favicons/android-36x36.png differ diff --git a/docs/static/favicons/android-48x48.png b/docs/static/favicons/android-48x48.png index 4f0737be5e8..40038ddce58 100644 Binary files a/docs/static/favicons/android-48x48.png and b/docs/static/favicons/android-48x48.png differ diff --git a/docs/static/favicons/android-72x72.png b/docs/static/favicons/android-72x72.png index b34c5500eb5..9217cd9b4df 100644 Binary files a/docs/static/favicons/android-72x72.png and b/docs/static/favicons/android-72x72.png differ diff --git a/docs/static/favicons/android-96x96.png b/docs/static/favicons/android-96x96.png index 8367d218bb3..afbffc03c0c 100644 Binary files a/docs/static/favicons/android-96x96.png and b/docs/static/favicons/android-96x96.png differ diff --git a/docs/static/favicons/apple-touch-icon-180x180.png b/docs/static/favicons/apple-touch-icon-180x180.png index 5ba95e9ca0c..a3a4f9c9156 100644 Binary files a/docs/static/favicons/apple-touch-icon-180x180.png and b/docs/static/favicons/apple-touch-icon-180x180.png differ diff --git a/docs/static/favicons/favicon-16x16.png b/docs/static/favicons/favicon-16x16.png index 2e460bfd26d..16820e6eec8 100644 Binary files a/docs/static/favicons/favicon-16x16.png and b/docs/static/favicons/favicon-16x16.png differ diff --git a/docs/static/favicons/favicon-32x32.png b/docs/static/favicons/favicon-32x32.png index 02a2116b3ff..23907ffb02e 100644 Binary files a/docs/static/favicons/favicon-32x32.png and b/docs/static/favicons/favicon-32x32.png differ diff --git a/docs/static/favicons/pwa-192x192.png b/docs/static/favicons/pwa-192x192.png index 087315a23de..e0d272e92f5 100644 Binary files a/docs/static/favicons/pwa-192x192.png and b/docs/static/favicons/pwa-192x192.png differ diff --git a/docs/static/favicons/pwa-512x512.png b/docs/static/favicons/pwa-512x512.png index 1157b61fbd4..7e4d0249408 100644 Binary files a/docs/static/favicons/pwa-512x512.png and b/docs/static/favicons/pwa-512x512.png differ diff --git a/docs/static/favicons/tile150x150.png b/docs/static/favicons/tile150x150.png index 1c0d3600d79..eab4b25b5a1 100644 Binary files a/docs/static/favicons/tile150x150.png and b/docs/static/favicons/tile150x150.png differ diff --git a/docs/static/favicons/tile310x150.png b/docs/static/favicons/tile310x150.png index 4813abe06a1..3c5f6e29a12 100644 Binary files a/docs/static/favicons/tile310x150.png and b/docs/static/favicons/tile310x150.png differ diff --git a/docs/static/favicons/tile310x310.png b/docs/static/favicons/tile310x310.png index b14a5271414..fc067b12e92 100644 Binary files a/docs/static/favicons/tile310x310.png and b/docs/static/favicons/tile310x310.png differ diff --git a/docs/static/favicons/tile70x70.png b/docs/static/favicons/tile70x70.png index efcbfc61e60..8daa02927bd 100644 Binary files a/docs/static/favicons/tile70x70.png and b/docs/static/favicons/tile70x70.png differ diff --git a/docs/static/images/architecture.png b/docs/static/images/architecture.png index da18a7b9f31..3a9be650192 100644 Binary files a/docs/static/images/architecture.png and b/docs/static/images/architecture.png differ diff --git a/docs/static/images/workflow.png b/docs/static/images/workflow.png index eef3ac6325e..a81780084c2 100644 Binary files a/docs/static/images/workflow.png and b/docs/static/images/workflow.png differ diff --git a/docs/static/images/workflow_gcb.png b/docs/static/images/workflow_gcb.png index 5b7b66f5705..240cb832fe5 100644 Binary files a/docs/static/images/workflow_gcb.png and b/docs/static/images/workflow_gcb.png differ diff --git a/docs/static/images/workflow_local.png b/docs/static/images/workflow_local.png index 76c44671d18..f6eda290f6b 100644 Binary files a/docs/static/images/workflow_local.png and b/docs/static/images/workflow_local.png differ diff --git a/examples/annotated-skaffold.yaml b/examples/annotated-skaffold.yaml index 46252613de4..cba4f108bee 100644 --- a/examples/annotated-skaffold.yaml +++ b/examples/annotated-skaffold.yaml @@ -1,5 +1,7 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config +# The build section has all the information needed to build images. +# It is a required section. build: # tagPolicy (beta) determines how Skaffold is going to tag your images. # We provide a few strategies here, although you most likely won't need to care! @@ -17,9 +19,6 @@ build: # The template is compiled and executed against the current environment, # with those variables injected: # IMAGE_NAME | Name of the image being built, as supplied in the artifacts section. - # DIGEST | Digest of the newly built image. For eg. `sha256:27ffc7f352665cc50ae3cbcc4b2725e36062f1b38c611b6f95d6df9a7510de23`. - # DIGEST_ALGO | Algorithm used by the digest: For eg. `sha256`. - # DIGEST_HEX | Digest of the newly built image. For eg. `27ffc7f352665cc50ae3cbcc4b2725e36062f1b38c611b6f95d6df9a7510de23`. # Example # envTemplate: # template: "{{.RELEASE}}-{{.IMAGE_NAME}}" @@ -56,8 +55,8 @@ build: key2: "value2" # Images to consider as cache sources cacheFrom: - - image1 - - image2 + - golang:1.10.1-alpine3.7 + - alpine:3.7 # Dockerfile target name to build. # target: stageName @@ -74,10 +73,16 @@ build: # jibMaven: # module: modulename # selects which maven module to build, for a multimodule project # profile: profilename # selects which maven profile to activate + # args: # additional arguments to pass to maven + # - "arg1" + # - "arg2" # jibGradle builds containers using the Jib plugin for Gradle. # jibGradle: # project: projectname # selects which gradle project to build + # args: # additional arguments to pass to gradle + # - "arg1" + # - "arg2" # This next section is where you'll put your specific builder configuration. # Valid builders are `local` (beta), `googleCloudBuild` (beta) and `kaniko` (beta). @@ -100,7 +105,7 @@ build: # useDockerCLI: false # useBuildkit: false - # Docker artifacts can be built on Google Cloud Build. The projectId then needs + # Docker and Jib artifacts can be built on Google Cloud Build. The projectId then needs # to be provided and the currently logged user should be given permissions to trigger # new builds on Cloud Build. # If the projectId is not provided, Skaffold will try to guess it from the image name. @@ -113,20 +118,22 @@ build: # machineType: "N1_HIGHCPU_8"|"N1_HIGHCPU_32" # timeout: 10000s # dockerImage: gcr.io/cloud-builders/docker + # mavenImage: gcr.io/cloud-builders/mvn + # gradleImage: gcr.io/cloud-builders/gradle # Docker artifacts can be built on a Kubernetes cluster with Kaniko. # Exactly one buildContext must be specified to use kaniko - # If localDir is specified, skaffold will mount sources directly via a emptyDir volume + # If localDir is specified, skaffold will mount sources directly via a emptyDir volume # If gcsBucket is specified, skaffold will send sources to the GCS bucket provided # Kaniko also needs access to a service account to push the final image. # See https://github.com/GoogleContainerTools/kaniko#running-kaniko-in-a-kubernetes-cluster # If cache is specified, kaniko will use a remote cache which will speed up builds. # A cache repo can be specified to store cached layers, otherwise one will be inferred - # from the image name. See https://github.com/GoogleContainerTools/kaniko#caching + # from the image name. See https://github.com/GoogleContainerTools/kaniko#caching # # Additional flags can be specified as a list. To see all additional flags, visit: # https://github.com/GoogleContainerTools/kaniko#additional-flags - # + # # kaniko: # buildContext: # gcsBucket: k8s-skaffold @@ -140,9 +147,20 @@ build: # namespace: default # timeout: 20m # image: defaults to the latest released version of `gcr.io/kaniko-project/executor` - -# The deploy section has all the information needed to deploy. Along with build: -# it is a required section. + # dockerConfig: + # path: path to the docker config.json + # secretName: docker-cfg + +# The test section has all the information needed to test images. +test: + # For each image listed here, Skaffold will run a series of structure tests using + # the [Container Structure Tests](https://github.com/GoogleContainerTools/container-structure-test) + # project + # - image: gcr.io/k8s-skaffold/skaffold-example + # structureTests: + # - ./test/* + +# The deploy section has all the information needed to deploy. deploy: # The type of the deployment method can be `kubectl` (beta), `helm` (beta) or `kustomize` (beta). @@ -189,6 +207,8 @@ deploy: # namespace: skaffold # version: "" # recreatePods: false + # # set to true if you need to skip "helm dep build". Necessary for use with remote chart. + # skipBuildDependencies: false # # # setValues get appended to the helm deploy with --set. # setValues: @@ -215,3 +235,18 @@ profiles: build: googleCloudBuild: projectId: k8s-skaffold + - name: other + # profiles can also patch some values using standard JSON patch notation + patches: + # This profile will replace the `dockerfile` value of the first artifact by `Dockerfile.DEV` + - path: /build/artifacts/0/docker/dockerfile + value: Dockerfile.DEV + # profiles can be auto-activated by external factors, like environment variables + # kubeContext value or depending on which skaffold command is run. + activation: + # Auto-activate this profile if the DEBUG env variable is set to `true` AND the kubeContext is `ctx1` + # - env: DEBUG=true + # kubeContext: ctx1 + # Auto-activate if the skaffold command is `skaffold run` OR if the kubeContext is NOT `ctx2` + # - command: run + # - kubeContext: "!ctx2" diff --git a/examples/bazel/WORKSPACE b/examples/bazel/WORKSPACE index d095946df88..6f90ba50199 100644 --- a/examples/bazel/WORKSPACE +++ b/examples/bazel/WORKSPACE @@ -1,17 +1,18 @@ workspace(name = "skaffold") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") -git_repository( +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( name = "io_bazel_rules_docker", - remote = "https://github.com/bazelbuild/rules_docker.git", - tag = "v0.5.1", + strip_prefix = "rules_docker-0.7.0", + urls = ["https://github.com/bazelbuild/rules_docker/archive/v0.7.0.tar.gz"], + sha256 = "aed1c249d4ec8f703edddf35cbe9dfaca0b5f5ea6e4cd9e83e99f3b0d1136c3d", ) -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "io_bazel_rules_go", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.2/rules_go-0.16.2.tar.gz"], - sha256 = "f87fa87475ea107b3c69196f39c82b7bbf58fe27c62a338684c20ca17d1d8613", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.5/rules_go-0.16.5.tar.gz"], + sha256 = "7be7dc01f1e0afdba6c8eb2b43d2fa01c743be1b9273ab1eaf6c233df078d705", ) load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") diff --git a/examples/bazel/skaffold.yaml b/examples/bazel/skaffold.yaml index 5c3d741c1d8..94fc6594c7d 100644 --- a/examples/bazel/skaffold.yaml +++ b/examples/bazel/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/getting-started/skaffold.yaml b/examples/getting-started/skaffold.yaml index 24fed4bbcb0..2cb37c3b235 100644 --- a/examples/getting-started/skaffold.yaml +++ b/examples/getting-started/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/helm-deployment/skaffold.yaml b/examples/helm-deployment/skaffold.yaml index 911148134cb..08738411bc2 100644 --- a/examples/helm-deployment/skaffold.yaml +++ b/examples/helm-deployment/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: tagPolicy: diff --git a/examples/hot-reload/skaffold.yaml b/examples/hot-reload/skaffold.yaml index 91936ffff4a..f0f4041a0ea 100644 --- a/examples/hot-reload/skaffold.yaml +++ b/examples/hot-reload/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/jib-multimodule/.gitignore b/examples/jib-multimodule/.gitignore new file mode 100644 index 00000000000..9d26fe4575a --- /dev/null +++ b/examples/jib-multimodule/.gitignore @@ -0,0 +1,5 @@ +target +.classpath +.project +.settings +.vscode diff --git a/examples/jib-multimodule/.mvn/wrapper/MavenWrapperDownloader.java b/examples/jib-multimodule/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100755 index 00000000000..fa4f7b499fd --- /dev/null +++ b/examples/jib-multimodule/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,110 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +*/ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = + "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if(mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if(mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: : " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if(!outputFile.getParentFile().exists()) { + if(!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.jar b/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.jar new file mode 100755 index 00000000000..01e67997377 Binary files /dev/null and b/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.jar differ diff --git a/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.properties b/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.properties new file mode 100755 index 00000000000..cd0d451ccd6 --- /dev/null +++ b/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip diff --git a/examples/jib-multimodule/README.adoc b/examples/jib-multimodule/README.adoc new file mode 100644 index 00000000000..f1ea1431d23 --- /dev/null +++ b/examples/jib-multimodule/README.adoc @@ -0,0 +1,51 @@ +=== Example: Jib Multi-Module +:icons: font + +Jib is one of the supported builders in Skaffold. +[Jib](https://github.com/GoogleContainerTools/jib) builds Docker and OCI images +for your Java applications and is available as plugins for Maven and Gradle. + +Sometimes a project is configured to have multiple modules to create several +container images. Skaffold can work with Jib to build these containers as +required. + +The way you configure it in `skaffold.yaml` is the following build stanza: + +[source,yaml] +---- +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-jib-1 + # context is the root of the multi-module project + context: . + jibMaven: + # module is either the relative location within the project (i.e., + # relative to `context`) or :artifactId or groupId:artifactId + module: moduleLocation + - image: gcr.io/k8s-skaffold/skaffold-jib-2 + context: . + jibMaven: + module: :artifactId +---- + +There are a few caveats: + + - The `jib-maven-plugin` must be either be configured referenced in the + root module of the project. This is easily done through a `pluginManagement` + block. + + - The artifact modules must have a `jib:xxx` goal bound to the `package` phase. + +ifndef::env-github[] +==== link:{github-repo-tree}/examples/jib[Example files icon:github[]] + +[source,yaml, indent=3, title=skaffold.yaml] +---- +include::skaffold.yaml[] +---- + +[source,xml, indent=3, title=pom.xml, syntax=xml] +---- +include::pom.xml[] +---- +endif::[] diff --git a/examples/jib-multimodule/k8s/web.yaml b/examples/jib-multimodule/k8s/web.yaml new file mode 100644 index 00000000000..7e3145c0158 --- /dev/null +++ b/examples/jib-multimodule/k8s/web.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web1 +spec: + selector: + matchLabels: + app: web1 + template: + metadata: + labels: + app: web1 + spec: + containers: + - name: web1 + image: gcr.io/k8s-skaffold/skaffold-jib-1 + ports: + - containerPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web2 +spec: + selector: + matchLabels: + app: web2 + template: + metadata: + labels: + app: web2 + spec: + containers: + - name: web2 + image: gcr.io/k8s-skaffold/skaffold-jib-2 + ports: + - containerPort: 8080 diff --git a/examples/jib-multimodule/mvnw b/examples/jib-multimodule/mvnw new file mode 100755 index 00000000000..5551fde8e7d --- /dev/null +++ b/examples/jib-multimodule/mvnw @@ -0,0 +1,286 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + # TODO classpath? +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + wget "$jarUrl" -O "$wrapperJarPath" + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + curl -o "$wrapperJarPath" "$jarUrl" + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/examples/jib-multimodule/mvnw.cmd b/examples/jib-multimodule/mvnw.cmd new file mode 100755 index 00000000000..48363fa60b9 --- /dev/null +++ b/examples/jib-multimodule/mvnw.cmd @@ -0,0 +1,161 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" +FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + echo Found %WRAPPER_JAR% +) else ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" + echo Finished downloading %WRAPPER_JAR% +) +@REM End of extension + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/examples/jib-multimodule/pom.xml b/examples/jib-multimodule/pom.xml new file mode 100644 index 00000000000..aa28047971b --- /dev/null +++ b/examples/jib-multimodule/pom.xml @@ -0,0 +1,46 @@ + + + 4.0.0 + + org.skaffold + parent + 0.1.0 + Multi-Module Example with Skaffold and Jib + pom + + + org.springframework.boot + spring-boot-starter-parent + 2.0.5.RELEASE + + + + 1.8 + + + + + org.springframework.boot + spring-boot-starter-web + + + + + project1 + project2 + + + + + + + com.google.cloud.tools + jib-maven-plugin + 1.0.0 + + + + + diff --git a/examples/jib-multimodule/project1/pom.xml b/examples/jib-multimodule/project1/pom.xml new file mode 100644 index 00000000000..d78cdd80d0a --- /dev/null +++ b/examples/jib-multimodule/project1/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + + + org.skaffold + parent + 0.1.0 + + + skaffold-project-1 + 0.1.0 + Project 1: Spring Boot with Skaffold and Jib + + + 1.8 + + + + hello1 + + + org.springframework.boot + spring-boot-maven-plugin + + + com.google.cloud.tools + jib-maven-plugin + + + gcr.io/k8s-skaffold/project1 + + + + -Djava.security.egd=file:/dev/./urandom + + -XX:+UnlockExperimentalVMOptions + -XX:+UseCGroupMemoryLimitForHeap + + + + + + + default-package + package + + build + + + + + + + diff --git a/examples/jib-multimodule/project1/src/main/java/hello/Application.java b/examples/jib-multimodule/project1/src/main/java/hello/Application.java new file mode 100644 index 00000000000..5d77995503e --- /dev/null +++ b/examples/jib-multimodule/project1/src/main/java/hello/Application.java @@ -0,0 +1,11 @@ +package hello; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application { + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} \ No newline at end of file diff --git a/examples/jib-multimodule/project1/src/main/java/hello/HelloController.java b/examples/jib-multimodule/project1/src/main/java/hello/HelloController.java new file mode 100644 index 00000000000..4121e5c6d11 --- /dev/null +++ b/examples/jib-multimodule/project1/src/main/java/hello/HelloController.java @@ -0,0 +1,12 @@ +package hello; + +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.bind.annotation.RequestMapping; + +@RestController +public class HelloController { + @RequestMapping("/") + public String index() { + return "Hello from project1!"; + } +} diff --git a/examples/jib-multimodule/project2/pom.xml b/examples/jib-multimodule/project2/pom.xml new file mode 100644 index 00000000000..d08fba19b27 --- /dev/null +++ b/examples/jib-multimodule/project2/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + + + org.skaffold + parent + 0.1.0 + + + skaffold-project-2 + 0.1.0 + Project 2: Spring Boot with Skaffold and Jib + + + 1.8 + + + + hello2 + + + org.springframework.boot + spring-boot-maven-plugin + + + com.google.cloud.tools + jib-maven-plugin + + + gcr.io/k8s-skaffold/project2 + + + + -Djava.security.egd=file:/dev/./urandom + + -XX:+UnlockExperimentalVMOptions + -XX:+UseCGroupMemoryLimitForHeap + + + + + + + default-package + package + + build + + + + + + + diff --git a/examples/jib-multimodule/project2/src/main/java/hello/Application.java b/examples/jib-multimodule/project2/src/main/java/hello/Application.java new file mode 100644 index 00000000000..5d77995503e --- /dev/null +++ b/examples/jib-multimodule/project2/src/main/java/hello/Application.java @@ -0,0 +1,11 @@ +package hello; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application { + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} \ No newline at end of file diff --git a/examples/jib-multimodule/project2/src/main/java/hello/HelloController.java b/examples/jib-multimodule/project2/src/main/java/hello/HelloController.java new file mode 100644 index 00000000000..0417f2cd760 --- /dev/null +++ b/examples/jib-multimodule/project2/src/main/java/hello/HelloController.java @@ -0,0 +1,12 @@ +package hello; + +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.bind.annotation.RequestMapping; + +@RestController +public class HelloController { + @RequestMapping("/") + public String index() { + return "Hello from project2!"; + } +} diff --git a/examples/jib-multimodule/skaffold.yaml b/examples/jib-multimodule/skaffold.yaml new file mode 100644 index 00000000000..5603852e1b1 --- /dev/null +++ b/examples/jib-multimodule/skaffold.yaml @@ -0,0 +1,18 @@ +apiVersion: skaffold/v1beta5 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-jib-1 + jibMaven: + # module can be the relative directory + module: project1 + - image: gcr.io/k8s-skaffold/skaffold-jib-2 + jibMaven: + # module can be [groupId]:artifactId + module: :skaffold-project-2 + +# optional profile to run the jib build on Google Cloud Build +profiles: + - name: gcb + build: + googleCloudBuild: {} diff --git a/examples/jib/.mvn/wrapper/MavenWrapperDownloader.java b/examples/jib/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100755 index 00000000000..fa4f7b499fd --- /dev/null +++ b/examples/jib/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,110 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +*/ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = + "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if(mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if(mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: : " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if(!outputFile.getParentFile().exists()) { + if(!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/examples/jib/.mvn/wrapper/maven-wrapper.jar b/examples/jib/.mvn/wrapper/maven-wrapper.jar new file mode 100755 index 00000000000..01e67997377 Binary files /dev/null and b/examples/jib/.mvn/wrapper/maven-wrapper.jar differ diff --git a/examples/jib/.mvn/wrapper/maven-wrapper.properties b/examples/jib/.mvn/wrapper/maven-wrapper.properties new file mode 100755 index 00000000000..cd0d451ccd6 --- /dev/null +++ b/examples/jib/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip diff --git a/examples/jib/k8s/web.yaml b/examples/jib/k8s/web.yaml index 34c04f9b2b8..e0382bd2ac9 100644 --- a/examples/jib/k8s/web.yaml +++ b/examples/jib/k8s/web.yaml @@ -26,4 +26,5 @@ spec: containers: - name: web image: gcr.io/k8s-skaffold/skaffold-jib - + ports: + - containerPort: 8080 diff --git a/examples/jib/mvnw b/examples/jib/mvnw new file mode 100755 index 00000000000..5551fde8e7d --- /dev/null +++ b/examples/jib/mvnw @@ -0,0 +1,286 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + # TODO classpath? +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + wget "$jarUrl" -O "$wrapperJarPath" + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + curl -o "$wrapperJarPath" "$jarUrl" + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/examples/jib/mvnw.cmd b/examples/jib/mvnw.cmd new file mode 100755 index 00000000000..48363fa60b9 --- /dev/null +++ b/examples/jib/mvnw.cmd @@ -0,0 +1,161 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" +FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + echo Found %WRAPPER_JAR% +) else ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" + echo Finished downloading %WRAPPER_JAR% +) +@REM End of extension + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/examples/jib/pom.xml b/examples/jib/pom.xml index 83b6ccf58cf..2d82fb02864 100644 --- a/examples/jib/pom.xml +++ b/examples/jib/pom.xml @@ -34,11 +34,12 @@ com.google.cloud.tools jib-maven-plugin - 0.10.1 + 1.0.0 -Djava.security.egd=file:/dev/./urandom + -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap diff --git a/examples/jib/skaffold.yaml b/examples/jib/skaffold.yaml index c2ca96aa072..371e8e5bc0d 100644 --- a/examples/jib/skaffold.yaml +++ b/examples/jib/skaffold.yaml @@ -1,6 +1,12 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: - image: gcr.io/k8s-skaffold/skaffold-jib jibMaven: {} + +# optional profile to run the jib build on Google Cloud Build +profiles: + - name: gcb + build: + googleCloudBuild: {} diff --git a/examples/kaniko-local/skaffold.yaml b/examples/kaniko-local/skaffold.yaml index f3d5b6f123b..df5be83e7f8 100644 --- a/examples/kaniko-local/skaffold.yaml +++ b/examples/kaniko-local/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/kaniko/skaffold.yaml b/examples/kaniko/skaffold.yaml index 827c2d894ef..b62bc62dab9 100644 --- a/examples/kaniko/skaffold.yaml +++ b/examples/kaniko/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/kustomize/skaffold.yaml b/examples/kustomize/skaffold.yaml index 48bc3a8739a..f6080f60561 100644 --- a/examples/kustomize/skaffold.yaml +++ b/examples/kustomize/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config deploy: kustomize: {} diff --git a/examples/microservices/skaffold.yaml b/examples/microservices/skaffold.yaml index 1f40696dc67..a8b6a457407 100644 --- a/examples/microservices/skaffold.yaml +++ b/examples/microservices/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/nodejs/skaffold.yaml b/examples/nodejs/skaffold.yaml index 94d1b433250..fd6616f52e8 100644 --- a/examples/nodejs/skaffold.yaml +++ b/examples/nodejs/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/structure-tests/skaffold.yaml b/examples/structure-tests/skaffold.yaml index 028409de9fe..f69949f65a8 100644 --- a/examples/structure-tests/skaffold.yaml +++ b/examples/structure-tests/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/examples/tagging-with-environment-variables/skaffold.yaml b/examples/tagging-with-environment-variables/skaffold.yaml index 3437f0db666..f7ba0802d6d 100644 --- a/examples/tagging-with-environment-variables/skaffold.yaml +++ b/examples/tagging-with-environment-variables/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/hack/boilerplate.sh b/hack/boilerplate.sh index c46232c68b2..19b9dfa698a 100755 --- a/hack/boilerplate.sh +++ b/hack/boilerplate.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index a740a06d320..dedd7b44be1 100644 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -143,7 +143,7 @@ def get_regexs(): # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing regexs["year"] = re.compile( 'YEAR' ) # dates can be 2018, company holder names can be anything - regexs["date"] = re.compile( '(2018)' ) + regexs["date"] = re.compile( '(2019)' ) # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts diff --git a/hack/check-docs.sh b/hack/check-docs.sh index 71821f4a512..a5543032df8 100755 --- a/hack/check-docs.sh +++ b/hack/check-docs.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,14 +17,14 @@ cp docs/content/en/docs/references/cli/index_header docs/content/en/docs/references/cli/_index.md go run cmd/skaffold/man/man.go >> docs/content/en/docs/references/cli/_index.md -readonly CLI_CHANGES=`git status -s | grep "docs/" | grep -x vendor | wc -l` +readonly CLI_CHANGES=`git status -s | grep "docs/" | wc -l` if [ $CLI_CHANGES -gt 0 ]; then echo "You have skaffold command changes but haven't generated the CLI reference docs. Please run hack/check-docs.sh and commit the results!" exit 1 fi -readonly DOCS_CHANGES=`git diff --name-status master | grep "docs/" | grep -x vendor | wc -l` +readonly DOCS_CHANGES=`git diff --name-status master | grep "docs/" | wc -l` if [ $DOCS_CHANGES -gt 0 ]; then echo "There are $DOCS_CHANGES changes in docs, testing site generation..." diff --git a/hack/dep.sh b/hack/dep.sh index de9c4739a88..3d194763261 100755 --- a/hack/dep.sh +++ b/hack/dep.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/hack/gofmt.sh b/hack/gofmt.sh index 2200e6d2ce8..649cb8171d2 100755 --- a/hack/gofmt.sh +++ b/hack/gofmt.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/hack/kokoro/presubmit.sh b/hack/kokoro/presubmit.sh index 8a406528ca0..b241a5f5460 100644 --- a/hack/kokoro/presubmit.sh +++ b/hack/kokoro/presubmit.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/hack/linter.sh b/hack/linter.sh index d8679be8ef6..7276c839601 100755 --- a/hack/linter.sh +++ b/hack/linter.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/deploy/kubectl/warnings.go b/hack/new_config_version/version.go similarity index 63% rename from pkg/skaffold/deploy/kubectl/warnings.go rename to hack/new_config_version/version.go index 40738379ec3..86d4c6c7a9a 100644 --- a/pkg/skaffold/deploy/kubectl/warnings.go +++ b/hack/new_config_version/version.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package main -import "github.com/sirupsen/logrus" +import ( + "fmt" + "strings" -// Warner shows warnings. -type Warner interface { - Warnf(format string, args ...interface{}) -} - -type logrusWarner struct{} + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" +) -func (l *logrusWarner) Warnf(format string, args ...interface{}) { - logrus.Warnf(format, args...) +func main() { + fmt.Println(strings.TrimPrefix(latest.Version, "skaffold/")) } diff --git a/hack/new_version.sh b/hack/new_version.sh new file mode 100755 index 00000000000..2235942bb91 --- /dev/null +++ b/hack/new_version.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Skaffold Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CURRENT_VERSION=`go run hack/new_config_version/version.go` +echo "Current config version: $CURRENT_VERSION" + +echo "Please enter new config version:" +read NEW_VERSION + +echo "Please enter previous config version:" +read PREV_VERSION + +echo "Generating changes for new config version $NEW_VERSION..." + +sed -i docs/config.toml -e "s;$CURRENT_VERSION;$NEW_VERSION;g" + +cp -R pkg/skaffold/schema/latest pkg/skaffold/schema/${CURRENT_VERSION} + +sed -i pkg/skaffold/schema/${CURRENT_VERSION}/*.go -e "s;latest;$CURRENT_VERSION;g" + +sed pkg/skaffold/schema/${PREV_VERSION}/upgrade_test.go -e "s;$CURRENT_VERSION;$NEW_VERSION;g" > pkg/skaffold/schema/${CURRENT_VERSION}/upgrade_test.go +sed -i pkg/skaffold/schema/${CURRENT_VERSION}/upgrade_test.go -e "s;$PREV_VERSION;$CURRENT_VERSION;g" + +sed pkg/skaffold/schema/${PREV_VERSION}/upgrade.go -e "s;$CURRENT_VERSION;$NEW_VERSION;g" > pkg/skaffold/schema/${CURRENT_VERSION}/upgrade.go +sed -i pkg/skaffold/schema/${CURRENT_VERSION}/upgrade.go -e "s;$PREV_VERSION;$CURRENT_VERSION;g" + +sed -i pkg/skaffold/schema/${PREV_VERSION}/upgrade*.go -e "s;latest;$CURRENT_VERSION;g" +goimports -w pkg/skaffold/schema/${PREV_VERSION}/upgrade*.go + +sed -i pkg/skaffold/schema/latest/config.go -e "s;$CURRENT_VERSION;$NEW_VERSION;g" + +find integration -name "skaffold.yaml" -print0 | xargs -0 -I xx sed -i xx -e "s;$CURRENT_VERSION;$NEW_VERSION;g" + +sed pkg/skaffold/schema/versions.go -i -e "s;\(.*\)$PREV_VERSION.Version\(.*\)$PREV_VERSION\(.*\);&\n\1$CURRENT_VERSION.Version\2$CURRENT_VERSION\3;g" +sed pkg/skaffold/schema/versions.go -i -e "s;\(.*\)/$PREV_VERSION\(.*\);&\n\1/$CURRENT_VERSION\2;g" + +make generate-schemas + +git --no-pager diff --minimal + +make test + +echo +echo "---------------------------------------" +echo +echo "Files generated for $NEW_VERSION. Don't worry about the hack/check-docs change failure, it is expected!" +echo "Other tests should have passed. For the docs change, commit the results and rerun 'make test'." +echo "Please double check manually the generated files as well: the upgrade functionality, and all the examples:" +echo +git status -s +echo +echo "---------------------------------------" \ No newline at end of file diff --git a/hack/release.sh b/hack/release.sh index a649460ee02..8dac91934fb 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -1,4 +1,4 @@ -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,5 +22,8 @@ go run ${DIR}/release_notes/listpullreqs.go # sync files from integration examples to examples/ rm -rf ${EXAMPLES_DIR} && rm -rf ${INTEGRATION_EXAMPLES_DIR}/bazel/bazel-* && cp -r ${INTEGRATION_EXAMPLES_DIR} ${EXAMPLES_DIR} && rm -rf ${EXAMPLES_DIR}/test-* -echo "Huge thank you for this release towards our contributors: " +echo +echo "Huge thanks goes out to all of our contributors for this release: " +echo git log "$(git describe --abbrev=0)".. --format="%aN" --reverse | sort | uniq | awk '{printf "- %s\n", $0 }' +echo \ No newline at end of file diff --git a/hack/release_notes/listpullreqs.go b/hack/release_notes/listpullreqs.go index efadfed11c2..51af142c157 100644 --- a/hack/release_notes/listpullreqs.go +++ b/hack/release_notes/listpullreqs.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/hack/schemas/main.go b/hack/schemas/main.go new file mode 100644 index 00000000000..bf85fececde --- /dev/null +++ b/hack/schemas/main.go @@ -0,0 +1,337 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "os" + "reflect" + "regexp" + "strings" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema" + "github.com/pkg/errors" + blackfriday "gopkg.in/russross/blackfriday.v2" +) + +const defPrefix = "#/definitions/" + +type Schema struct { + *Definition + Definitions *Definitions `json:"definitions,omitempty"` +} + +type Definitions struct { + keys []string + values map[string]*Definition +} + +func (d *Definitions) Add(key string, value *Definition) { + d.keys = append(d.keys, key) + if d.values == nil { + d.values = make(map[string]*Definition) + } + d.values[key] = value +} + +func (d *Definitions) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + buf.WriteString("{") + for i, k := range d.keys { + if i != 0 { + buf.WriteString(",") + } + // marshal key + key, err := json.Marshal(k) + if err != nil { + return nil, err + } + buf.Write(key) + buf.WriteString(":") + + // marshal value + var val bytes.Buffer + encoder := json.NewEncoder(&val) + encoder.SetEscapeHTML(false) + if err := encoder.Encode(d.values[k]); err != nil { + return nil, err + } + buf.Write(val.Bytes()) + } + buf.WriteString("}") + + return buf.Bytes(), nil +} + +type Definition struct { + Ref string `json:"$ref,omitempty"` + Items *Definition `json:"items,omitempty"` + Required []string `json:"required,omitempty"` + Properties *Definitions `json:"properties,omitempty"` + AdditionalProperties interface{} `json:"additionalProperties,omitempty"` + Type string `json:"type,omitempty"` + AnyOf []*Definition `json:"anyOf,omitempty"` + Description string `json:"description,omitempty"` + Default interface{} `json:"default,omitempty"` + Examples []string `json:"examples,omitempty"` +} + +func main() { + if _, err := generateSchemas(".", false); err != nil { + panic(err) + } +} + +func generateSchemas(root string, dryRun bool) (bool, error) { + same := true + + for i, version := range schema.SchemaVersions { + apiVersion := strings.TrimPrefix(version.APIVersion, "skaffold/") + folder := apiVersion + if i == len(schema.SchemaVersions)-1 { + folder = "latest" + } + + input := fmt.Sprintf("%s/pkg/skaffold/schema/%s/config.go", root, folder) + buf, err := generateSchema(input) + if err != nil { + return false, errors.Wrapf(err, "unable to generate schema for version %s", version.APIVersion) + } + + output := fmt.Sprintf("%s/docs/content/en/schemas/%s.json", root, apiVersion) + var current []byte + + if _, err := os.Stat(output); err == nil { + var err error + current, err = ioutil.ReadFile(output) + if err != nil { + return false, errors.Wrapf(err, "unable to read existing schema for version %s", version.APIVersion) + } + } else if !os.IsNotExist(err) { + return false, errors.Wrapf(err, "unable to check that file exists %s", output) + } + + if string(current) != string(buf) { + same = false + } + + if !dryRun { + ioutil.WriteFile(output, buf, os.ModePerm) + } + } + + return same, nil +} + +func yamlFieldName(field *ast.Field) string { + tag := strings.Replace(field.Tag.Value, "`", "", -1) + tags := reflect.StructTag(tag) + yamlTag := tags.Get("yaml") + + return strings.Split(yamlTag, ",")[0] +} + +func setTypeOrRef(def *Definition, typeName string) { + switch typeName { + case "string": + def.Type = typeName + case "bool": + def.Type = "boolean" + case "int", "int64": + def.Type = "number" + default: + def.Ref = defPrefix + typeName + } +} + +func newDefinition(name string, t ast.Expr, comment string) *Definition { + def := &Definition{} + + switch tt := t.(type) { + case *ast.Ident: + typeName := tt.Name + setTypeOrRef(def, typeName) + + switch typeName { + case "string": + // def.Default = "\"\"" + case "bool": + def.Default = "false" + case "int", "int64": + // def.Default = "0" + } + + case *ast.StarExpr: + if ident, ok := tt.X.(*ast.Ident); ok { + typeName := ident.Name + setTypeOrRef(def, typeName) + } else if _, ok := tt.X.(*ast.SelectorExpr); ok { + def.Type = "object" + } + + case *ast.ArrayType: + def.Type = "array" + def.Items = newDefinition("", tt.Elt, "") + if def.Items.Ref == "" { + def.Default = "[]" + } + + case *ast.MapType: + def.Type = "object" + def.Default = "{}" + def.AdditionalProperties = newDefinition("", tt.Value, "") + + case *ast.StructType: + for _, field := range tt.Fields.List { + yamlName := yamlFieldName(field) + + if strings.Contains(field.Tag.Value, "inline") { + def.AnyOf = append(def.AnyOf, &Definition{ + Ref: defPrefix + field.Type.(*ast.Ident).Name, + }) + continue + } + + if yamlName == "" { + continue + } + + if strings.Contains(field.Tag.Value, "required") { + def.Required = append(def.Required, yamlName) + } + + if def.Properties == nil { + def.Properties = &Definitions{} + } + + def.Properties.Add(yamlName, newDefinition(field.Names[0].Name, field.Type, field.Doc.Text())) + def.AdditionalProperties = false + } + } + + description := strings.TrimSpace(strings.Replace(comment, "\n", " ", -1)) + + // Extract default value + if m := regexp.MustCompile("(.*)Defaults to `(.*)`").FindStringSubmatch(description); m != nil { + description = strings.TrimSpace(m[1]) + def.Default = m[2] + } + + // Extract example + if m := regexp.MustCompile("(.*)For example: `(.*)`").FindStringSubmatch(description); m != nil { + description = strings.TrimSpace(m[1]) + def.Examples = []string{m[2]} + } + + // Remove type prefix + description = strings.TrimPrefix(description, name+" is the ") + description = strings.TrimPrefix(description, name+" is ") + description = strings.TrimPrefix(description, name+" are the ") + description = strings.TrimPrefix(description, name+" are ") + description = strings.TrimPrefix(description, name+" lists ") + description = strings.TrimPrefix(description, name+" ") + + // Convert to HTML + html := string(blackfriday.Run([]byte(description), blackfriday.WithNoExtensions())) + html = strings.Replace(html, "

          ", "", -1) + html = strings.Replace(html, "

          ", "", -1) + def.Description = strings.TrimSpace(html) + + return def +} + +func generateSchema(inputPath string) ([]byte, error) { + fset := token.NewFileSet() + node, err := parser.ParseFile(fset, inputPath, nil, parser.ParseComments) + if err != nil { + return nil, err + } + + definitions := &Definitions{} + + for _, i := range node.Decls { + declaration, ok := i.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range declaration.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + + name := typeSpec.Name.Name + definitions.Add(name, newDefinition(name, typeSpec.Type, declaration.Doc.Text())) + } + } + + // Inline anyOfs + for _, v := range definitions.values { + var options []*Definition + + for _, anyOf := range v.AnyOf { + ref := strings.TrimPrefix(anyOf.Ref, defPrefix) + referenced := definitions.values[ref] + + for _, key := range referenced.Properties.keys { + choice := &Definitions{} + choice.Add(key, referenced.Properties.values[key]) + + options = append(options, &Definition{ + Properties: choice, + }) + } + } + + v.AnyOf = options + v.AdditionalProperties = false + } + + schema := Schema{ + Definition: &Definition{ + Type: "object", + AnyOf: []*Definition{{ + Ref: defPrefix + "SkaffoldPipeline", + }}, + }, + Definitions: definitions, + } + + return toJSON(schema) +} + +// Make sure HTML description are not encoded +func toJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + + if err := encoder.Encode(v); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/hack/schemas/main_test.go b/hack/schemas/main_test.go new file mode 100644 index 00000000000..187179af5f9 --- /dev/null +++ b/hack/schemas/main_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" +) + +func TestSchemas(t *testing.T) { + same, err := generateSchemas("../..", true) + if err != nil { + t.Fatalf("unable to check json schemas: %v", err) + } + + if !same { + t.Fatal("json schema files are not up to date. Please run `make generate-schemas` and commit the changes.") + } +} diff --git a/integration/build_test.go b/integration/build_test.go new file mode 100644 index 00000000000..f7a0c99571c --- /dev/null +++ b/integration/build_test.go @@ -0,0 +1,67 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "os/exec" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" +) + +func TestBuild(t *testing.T) { + tests := []struct { + description string + dir string + args []string + }{ + { + description: "docker build", + dir: "testdata/build", + }, { + description: "git tagger", + dir: "testdata/tagPolicy", + args: []string{"-p", "gitCommit"}, + }, { + description: "sha256 tagger", + dir: "testdata/tagPolicy", + args: []string{"-p", "sha256"}, + }, { + description: "dateTime tagger", + dir: "testdata/tagPolicy", + args: []string{"-p", "dateTime"}, + }, { + description: "envTemplate tagger", + dir: "testdata/tagPolicy", + args: []string{"-p", "envTemplate"}, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + buildCmd := exec.Command("skaffold", append([]string{"build"}, test.args...)...) + buildCmd.Dir = test.dir + + out, err := util.RunCmdOut(buildCmd) + if err != nil { + t.Fatalf("testing error: %v, %s", err, out) + } + }) + } +} diff --git a/integration/config_test.go b/integration/config_test.go new file mode 100644 index 00000000000..91dc60eadb2 --- /dev/null +++ b/integration/config_test.go @@ -0,0 +1,173 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "os/exec" + "strings" + "testing" + + yaml "gopkg.in/yaml.v2" + + "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/cmd/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestListConfig(t *testing.T) { + baseConfig := &config.Config{ + Global: &config.ContextConfig{ + DefaultRepo: "global-repository", + }, + ContextConfigs: []*config.ContextConfig{ + { + Kubecontext: "test-context", + DefaultRepo: "context-local-repository", + }, + }, + } + + c, _ := yaml.Marshal(*baseConfig) + cfg, teardown := testutil.TempFile(t, "config", c) + defer teardown() + + type testListCase struct { + description string + kubectx string + expectedOutput []string + } + + var tests = []testListCase{ + { + description: "list for test-context", + kubectx: "test-context", + expectedOutput: []string{"default-repo: context-local-repository"}, + }, + { + description: "list all", + expectedOutput: []string{ + "global:", + "default-repo: global-repository", + "kube-context: test-context", + "default-repo: context-local-repository", + }, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + args := []string{"config", "list", "-c", cfg} + if test.kubectx != "" { + args = append(args, "-k", test.kubectx) + } else { + args = append(args, "--all") + } + cmd := exec.Command("skaffold", args...) + rawOut, err := util.RunCmdOut(cmd) + if err != nil { + t.Error(err) + } + out := string(rawOut) + for _, output := range test.expectedOutput { + if !strings.Contains(out, output) { + t.Errorf("expected output %s not found in output: %s", output, out) + } + } + }) + } +} + +func TestSetConfig(t *testing.T) { + baseConfig := &config.Config{ + Global: &config.ContextConfig{ + DefaultRepo: "global-repository", + }, + ContextConfigs: []*config.ContextConfig{ + { + Kubecontext: "test-context", + DefaultRepo: "context-local-repository", + }, + }, + } + + c, _ := yaml.Marshal(*baseConfig) + cfg, teardown := testutil.TempFile(t, "config", c) + defer teardown() + + type testSetCase struct { + description string + kubectx string + key string + shouldErr bool + } + + var tests = []testSetCase{ + { + description: "set default-repo for context", + kubectx: "test-context", + key: "default-repo", + }, + { + description: "set global default-repo", + key: "default-repo", + }, + { + description: "fail to set unrecognized value", + key: "doubt-this-will-ever-be-a-config-value", + shouldErr: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + value := util.RandomID() + args := []string{"config", "set", test.key, value} + args = append(args, "-c", cfg) + if test.kubectx != "" { + args = append(args, "-k", test.kubectx) + } else { + args = append(args, "--global") + } + cmd := exec.Command("skaffold", args...) + if err := util.RunCmd(cmd); err != nil { + if test.shouldErr { + return + } + t.Error(err) + } + + listArgs := []string{"config", "list", "-c", cfg} + if test.kubectx != "" { + listArgs = append(listArgs, "-k", test.kubectx) + } else { + listArgs = append(listArgs, "--all") + } + listCmd := exec.Command("skaffold", listArgs...) + out, err := util.RunCmdOut(listCmd) + if err != nil { + t.Error(err) + } + t.Log(string(out)) + if !strings.Contains(string(out), fmt.Sprintf("%s: %s", test.key, value)) { + t.Errorf("value %s not set correctly", test.key) + } + }) + } +} diff --git a/integration/deploy_test.go b/integration/deploy_test.go new file mode 100644 index 00000000000..5b2ce3a86ee --- /dev/null +++ b/integration/deploy_test.go @@ -0,0 +1,53 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "time" + + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "testing" + + kubernetesutil "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" +) + +func TestDeploy(t *testing.T) { + ns, deleteNs := SetupNamespace(t) + defer deleteNs() + + RunSkaffold(t, "deploy", "examples/kustomize", ns.Name, "", nil, "--images", "index.docker.io/library/busybox:1") + + depName := "kustomize-test" + if err := kubernetesutil.WaitForDeploymentToStabilize(context.Background(), Client, ns.Name, depName, 10*time.Minute); err != nil { + t.Fatalf("Timed out waiting for deployment to stabilize") + } + + dep, err := Client.AppsV1().Deployments(ns.Name).Get(depName, meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("Could not find deployment: %s %s", ns.Name, depName) + } + + if dep.Spec.Template.Spec.Containers[0].Image != "index.docker.io/library/busybox:1" { + t.Fatalf("Wrong image name in kustomized deployment: %s", dep.Spec.Template.Spec.Containers[0].Image) + } + + RunSkaffold(t, "delete", "examples/kustomize", ns.Name, "", nil) +} diff --git a/integration/dev_test.go b/integration/dev_test.go new file mode 100644 index 00000000000..2e46ef68ff7 --- /dev/null +++ b/integration/dev_test.go @@ -0,0 +1,68 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "testing" + "time" + + kubernetesutil "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +func TestDev(t *testing.T) { + ns, deleteNs := SetupNamespace(t) + defer deleteNs() + + Run(t, "examples/test-dev-job", "touch", "foo") + defer Run(t, "examples/test-dev-job", "rm", "foo") + + cancel := make(chan bool) + go RunSkaffoldNoFail(cancel, "dev", "examples/test-dev-job", ns.Name, "", nil) + defer func() { cancel <- true }() + + jobName := "test-dev-job" + if err := kubernetesutil.WaitForJobToStabilize(context.Background(), Client, ns.Name, jobName, 10*time.Minute); err != nil { + t.Fatalf("Timed out waiting for job to stabilize") + } + + job, err := Client.BatchV1().Jobs(ns.Name).Get(jobName, meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("Could not find job: %s %s", ns.Name, jobName) + } + + time.Sleep(5 * time.Second) + + // Make a change to foo so that dev is forced to delete the job and redeploy + Run(t, "examples/test-dev-job", "sh", "-c", "echo bar > foo") + + // Make sure the UID of the old Job and the UID of the new Job is different + err = wait.PollImmediate(time.Millisecond*500, 10*time.Minute, func() (bool, error) { + newJob, err := Client.BatchV1().Jobs(ns.Name).Get(job.Name, meta_v1.GetOptions{}) + if err != nil { + return false, nil + } + return job.GetUID() != newJob.GetUID(), nil + }) + if err != nil { + t.Fatalf("redeploy failed: %v", err) + } +} diff --git a/integration/examples/annotated-skaffold.yaml b/integration/examples/annotated-skaffold.yaml deleted file mode 100644 index 354b84695ba..00000000000 --- a/integration/examples/annotated-skaffold.yaml +++ /dev/null @@ -1,220 +0,0 @@ -apiVersion: skaffold/v1beta2 -kind: Config -build: - # tagPolicy (beta) determines how Skaffold is going to tag your images. - # We provide a few strategies here, although you most likely won't need to care! - # The policy can `gitCommit` (beta), `sha256` (beta), `envTemplate` (beta) or `dateTime` (beta). - # If not specified, it defaults to `gitCommit: {}`. - tagPolicy: - # gitCommit tags the image with the git commit of your current repository. - gitCommit: {} - - # sha256 tags the image with the checksum of the built image (image id). - # sha256: {} - - # envTemplate tags the image with a configurable template string. - # The template must be in the golang text/template syntax: https://golang.org/pkg/text/template/ - # The template is compiled and executed against the current environment, - # with those variables injected: - # IMAGE_NAME | Name of the image being built, as supplied in the artifacts section. - # DIGEST | Digest of the newly built image. For eg. `sha256:27ffc7f352665cc50ae3cbcc4b2725e36062f1b38c611b6f95d6df9a7510de23`. - # DIGEST_ALGO | Algorithm used by the digest: For eg. `sha256`. - # DIGEST_HEX | Digest of the newly built image. For eg. `27ffc7f352665cc50ae3cbcc4b2725e36062f1b38c611b6f95d6df9a7510de23`. - # Example - # envTemplate: - # template: "{{.RELEASE}}-{{.IMAGE_NAME}}" - - # dateTime tags the image with the build timestamp. - # The format can be overridden with golang formats, see: https://golang.org/pkg/time/#Time.Format - # Default format is "2006-01-02_15-04-05.999_MST - # The timezone is by default the local timezone, this can be overridden, see https://golang.org/pkg/time/#Time.LoadLocation - # dateTime: - # format: "2006-01-02" - # timezone: "UTC" - - # artifacts is a list of the actual images you're going to be building - # you can include as many as you want here. - artifacts: - # The name of the image to be built. - - image: gcr.io/k8s-skaffold/skaffold-example - # The path to your dockerfile context. Defaults to ".". - context: ../examples/getting-started - # Skaffold can sync local files with remote pods (alpha) instead - # of rebuilding the whole artifact's image. This is a mapping - # of local files to sync to remote folders. - # sync: - # '*.py': . - - # Each artifact is of a given type among: `docker` (beta), `bazel` (beta), `jibMaven` (alpha) and `jibGradle` (alpha). - # If not specified, it defaults to `docker: {}`. - docker: - # Dockerfile's location relative to workspace. Defaults to "Dockerfile" - dockerfile: Dockerfile - # Key/value arguements passed to the docker build. - buildArgs: - key1: "value1" - key2: "value2" - # Images to consider as cache sources - cacheFrom: - - image1 - - image2 - # Dockerfile target name to build. - # target: stageName - - # bazel requires bazel CLI to be installed and the artifacts sources to - # contain Bazel configuration files. - # bazel: - # target: //:skaffold_example.tar - # additional args to pass to `bazel build` - # args: - # - "arg1" - # - "arg2" - - # jibMaven builds containers using the Jib plugin for Maven. - # jibMaven: - # module: modulename # selects which maven module to build, for a multimodule project - # profile: profilename # selects which maven profile to activate - - # jibGradle builds containers using the Jib plugin for Gradle. - # jibGradle: - # project: projectname # selects which gradle project to build - -# This next section is where you'll put your specific builder configuration. - # Valid builders are `local` (beta), `googleCloudBuild` (beta) and `kaniko` (beta). - # Defaults to `local: {}` - - # Pushing the images can be skipped. If no value is specified, it'll default to - # `true` on minikube or Docker for Desktop, for even faster build and deploy cycles. - # `false` on other types of kubernetes clusters that require pushing the images. - # skaffold defers to your ~/.docker/config for authentication information. - # If you're using Google Container Registry, make sure that you have gcloud and - # docker-credentials-helper-gcr configured correctly. - # - # By default, the local builder connects to the Docker daemon with Go code to build - # images. If `useDockerCLI` is set, skaffold will simply shell out to the docker CLI. - # `useBuildkit` can also be set to activate the experimental BuildKit feature. - # - # local: - # false by default for local clusters, true for remote clusters - # push: false - # useDockerCLI: false - # useBuildkit: false - - # Docker artifacts can be built on Google Cloud Build. The projectId then needs - # to be provided and the currently logged user should be given permissions to trigger - # new builds on Cloud Build. - # If the projectId is not provided, Skaffold will try to guess it from the image name. - # For eg. If the artifact image name is gcr.io/myproject/image, then Skaffold will use - # the `myproject` GCP project. - # All the other parameters are also optional. The default values are listed here: - # googleCloudBuild: - # projectId: YOUR_PROJECT - # diskSizeGb: 200 - # machineType: "N1_HIGHCPU_8"|"N1_HIGHCPU_32" - # timeout: 10000s - # dockerImage: gcr.io/cloud-builders/docker - - # Docker artifacts can be built on a Kubernetes cluster with Kaniko. - # Exactly one buildContext must be specified to use kaniko - # If localDir is specified, skaffold will mount sources directly via a emptyDir volume - # If gcsBucket is specified, skaffold will send sources to the GCS bucket provided - # Kaniko also needs access to a service account to push the final image. - # See https://github.com/GoogleContainerTools/kaniko#running-kaniko-in-a-kubernetes-cluster - # If cache is specified, kaniko will use a remote cache which will speed up builds. - # A cache repo can be specified to store cached layers, otherwise one will be inferred - # from the image name. See https://github.com/GoogleContainerTools/kaniko#caching - # - # Additional flags can be specified as a list. To see all additional flags, visit: - # https://github.com/GoogleContainerTools/kaniko#additional-flags - # - # kaniko: - # buildContext: - # gcsBucket: k8s-skaffold - # localDir: {} - # cache: - # repo: gcr.io/my-project/skaffold/cache - # flags: - # - --aditional-flag - # pullSecret: /a/secret/path/serviceaccount.json - # pullSecretName: kaniko-secret - # namespace: default - # timeout: 20m - # image: defaults to the latest released version of `gcr.io/kaniko-project/executor` - -# The deploy section has all the information needed to deploy. Along with build: -# it is a required section. -deploy: - # The type of the deployment method can be `kubectl` (beta), `helm` (beta) or `kustomize` (beta). - - # The kubectl deployer uses a client side `kubectl apply` to apply the manifests to the cluster. - # You'll need a kubectl CLI version installed that's compatible with your cluster. - kubectl: - # manifests to deploy from files. - manifests: - - ../examples/getting-started/k8s-* - # kubectl can be passed additional option flags either on every command (Global), - # on creations (Apply) or deletions (Delete). - # flags: - # global: [""] - # apply: [""] - # delete: [""] - - # manifests to deploy from remote cluster. - # The path to where these manifests live in remote kubernetes cluster. - # Example - # remoteManifests: - # - deployment/web-app1 - # - namespace:deployment/web-app2 - - # kustomize: - # path: . - # kustomize deploys manifests with kubectl. - # kubectl can be passed additional option flags either on every command (Global), - # on creations (Apply) or deletions (Delete). - # flags: - # global: [""] - # apply: [""] - # delete: [""] - - # helm: - # helm releases to deploy. - # releases: - # - name: skaffold-helm - # chartPath: skaffold-helm - # valuesFiles: - # - first-values-file.yaml - # - second-values-file.yaml - # values: - # image: skaffold-helm - # namespace: skaffold - # version: "" - # recreatePods: false - # - # # Skip the helm dep build, required for charts with local dependencies - # skipDependencyBuild: false - # - # # setValues get appended to the helm deploy with --set. - # setValues: - # key: "value" - # - # # overrides builds an override values.yaml file to run with the helm deploy - # overrides: - # some: - # key: someValue - # - # # packaged section allows to package chart setting specific version - # # and/or appVersion using "helm package" command. - # packaged: - # # version is passed to "helm package --version" flag. - # # Note that you can specify both static string or dynamic template. - # version: {{ .CHART_VERSION }}-dirty - # # appVersion is passed to "helm package --app-version" flag. - # # Note that you can specify both static string or dynamic template. - # appVersion: {{ .CHART_VERSION }}-dirty - -# profiles (beta) section has all the profile information which can be used to override any build or deploy configuration -profiles: - - name: gcb - build: - googleCloudBuild: - projectId: k8s-skaffold diff --git a/integration/examples/bazel/WORKSPACE b/integration/examples/bazel/WORKSPACE index d095946df88..6f90ba50199 100644 --- a/integration/examples/bazel/WORKSPACE +++ b/integration/examples/bazel/WORKSPACE @@ -1,17 +1,18 @@ workspace(name = "skaffold") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") -git_repository( +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( name = "io_bazel_rules_docker", - remote = "https://github.com/bazelbuild/rules_docker.git", - tag = "v0.5.1", + strip_prefix = "rules_docker-0.7.0", + urls = ["https://github.com/bazelbuild/rules_docker/archive/v0.7.0.tar.gz"], + sha256 = "aed1c249d4ec8f703edddf35cbe9dfaca0b5f5ea6e4cd9e83e99f3b0d1136c3d", ) -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "io_bazel_rules_go", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.2/rules_go-0.16.2.tar.gz"], - sha256 = "f87fa87475ea107b3c69196f39c82b7bbf58fe27c62a338684c20ca17d1d8613", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.5/rules_go-0.16.5.tar.gz"], + sha256 = "7be7dc01f1e0afdba6c8eb2b43d2fa01c743be1b9273ab1eaf6c233df078d705", ) load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") diff --git a/integration/examples/bazel/skaffold.yaml b/integration/examples/bazel/skaffold.yaml index 5c3d741c1d8..94fc6594c7d 100644 --- a/integration/examples/bazel/skaffold.yaml +++ b/integration/examples/bazel/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/getting-started/skaffold.yaml b/integration/examples/getting-started/skaffold.yaml index 24fed4bbcb0..2cb37c3b235 100644 --- a/integration/examples/getting-started/skaffold.yaml +++ b/integration/examples/getting-started/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/helm-deployment/skaffold.yaml b/integration/examples/helm-deployment/skaffold.yaml index 911148134cb..08738411bc2 100644 --- a/integration/examples/helm-deployment/skaffold.yaml +++ b/integration/examples/helm-deployment/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: tagPolicy: diff --git a/integration/examples/hot-reload/node/Dockerfile b/integration/examples/hot-reload/node/Dockerfile index c273ea94aa1..d3434e4ed8c 100644 --- a/integration/examples/hot-reload/node/Dockerfile +++ b/integration/examples/hot-reload/node/Dockerfile @@ -2,4 +2,4 @@ FROM gcr.io/k8s-skaffold/nodemon EXPOSE 3000 CMD ["nodemon","--legacy-watch", "server.js"] -COPY *.js . +COPY src ./ diff --git a/integration/examples/hot-reload/node/server.js b/integration/examples/hot-reload/node/src/server.js similarity index 100% rename from integration/examples/hot-reload/node/server.js rename to integration/examples/hot-reload/node/src/server.js diff --git a/integration/examples/hot-reload/python/Dockerfile b/integration/examples/hot-reload/python/Dockerfile index 84ba519f0bf..69130533813 100644 --- a/integration/examples/hot-reload/python/Dockerfile +++ b/integration/examples/hot-reload/python/Dockerfile @@ -5,5 +5,5 @@ ENV FLASK_APP=app.py COPY requirements.txt . RUN pip install -r requirements.txt -COPY *.py . +COPY src ./ diff --git a/integration/examples/hot-reload/python/app.py b/integration/examples/hot-reload/python/src/app.py similarity index 100% rename from integration/examples/hot-reload/python/app.py rename to integration/examples/hot-reload/python/src/app.py diff --git a/integration/examples/hot-reload/skaffold.yaml b/integration/examples/hot-reload/skaffold.yaml index 91936ffff4a..7d07bd8cb8f 100644 --- a/integration/examples/hot-reload/skaffold.yaml +++ b/integration/examples/hot-reload/skaffold.yaml @@ -1,15 +1,15 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: - image: gcr.io/k8s-skaffold/node-example context: node sync: - '*.js': . + '**/*.js': . - image: gcr.io/k8s-skaffold/python-reload context: python sync: - '*.py': . + '**/*.py': . deploy: kubectl: manifests: diff --git a/integration/examples/jib-multimodule/.gitignore b/integration/examples/jib-multimodule/.gitignore new file mode 100644 index 00000000000..9d26fe4575a --- /dev/null +++ b/integration/examples/jib-multimodule/.gitignore @@ -0,0 +1,5 @@ +target +.classpath +.project +.settings +.vscode diff --git a/integration/examples/jib-multimodule/.mvn/wrapper/MavenWrapperDownloader.java b/integration/examples/jib-multimodule/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100755 index 00000000000..fa4f7b499fd --- /dev/null +++ b/integration/examples/jib-multimodule/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,110 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +*/ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = + "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if(mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if(mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: : " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if(!outputFile.getParentFile().exists()) { + if(!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/integration/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.jar b/integration/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.jar new file mode 100755 index 00000000000..01e67997377 Binary files /dev/null and b/integration/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.jar differ diff --git a/integration/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.properties b/integration/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.properties new file mode 100755 index 00000000000..cd0d451ccd6 --- /dev/null +++ b/integration/examples/jib-multimodule/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip diff --git a/integration/examples/jib-multimodule/README.adoc b/integration/examples/jib-multimodule/README.adoc new file mode 100644 index 00000000000..f1ea1431d23 --- /dev/null +++ b/integration/examples/jib-multimodule/README.adoc @@ -0,0 +1,51 @@ +=== Example: Jib Multi-Module +:icons: font + +Jib is one of the supported builders in Skaffold. +[Jib](https://github.com/GoogleContainerTools/jib) builds Docker and OCI images +for your Java applications and is available as plugins for Maven and Gradle. + +Sometimes a project is configured to have multiple modules to create several +container images. Skaffold can work with Jib to build these containers as +required. + +The way you configure it in `skaffold.yaml` is the following build stanza: + +[source,yaml] +---- +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-jib-1 + # context is the root of the multi-module project + context: . + jibMaven: + # module is either the relative location within the project (i.e., + # relative to `context`) or :artifactId or groupId:artifactId + module: moduleLocation + - image: gcr.io/k8s-skaffold/skaffold-jib-2 + context: . + jibMaven: + module: :artifactId +---- + +There are a few caveats: + + - The `jib-maven-plugin` must be either be configured referenced in the + root module of the project. This is easily done through a `pluginManagement` + block. + + - The artifact modules must have a `jib:xxx` goal bound to the `package` phase. + +ifndef::env-github[] +==== link:{github-repo-tree}/examples/jib[Example files icon:github[]] + +[source,yaml, indent=3, title=skaffold.yaml] +---- +include::skaffold.yaml[] +---- + +[source,xml, indent=3, title=pom.xml, syntax=xml] +---- +include::pom.xml[] +---- +endif::[] diff --git a/integration/examples/jib-multimodule/k8s/web.yaml b/integration/examples/jib-multimodule/k8s/web.yaml new file mode 100644 index 00000000000..7e3145c0158 --- /dev/null +++ b/integration/examples/jib-multimodule/k8s/web.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web1 +spec: + selector: + matchLabels: + app: web1 + template: + metadata: + labels: + app: web1 + spec: + containers: + - name: web1 + image: gcr.io/k8s-skaffold/skaffold-jib-1 + ports: + - containerPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web2 +spec: + selector: + matchLabels: + app: web2 + template: + metadata: + labels: + app: web2 + spec: + containers: + - name: web2 + image: gcr.io/k8s-skaffold/skaffold-jib-2 + ports: + - containerPort: 8080 diff --git a/integration/examples/jib-multimodule/mvnw b/integration/examples/jib-multimodule/mvnw new file mode 100755 index 00000000000..5551fde8e7d --- /dev/null +++ b/integration/examples/jib-multimodule/mvnw @@ -0,0 +1,286 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + # TODO classpath? +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + wget "$jarUrl" -O "$wrapperJarPath" + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + curl -o "$wrapperJarPath" "$jarUrl" + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/integration/examples/jib-multimodule/mvnw.cmd b/integration/examples/jib-multimodule/mvnw.cmd new file mode 100755 index 00000000000..48363fa60b9 --- /dev/null +++ b/integration/examples/jib-multimodule/mvnw.cmd @@ -0,0 +1,161 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" +FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + echo Found %WRAPPER_JAR% +) else ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" + echo Finished downloading %WRAPPER_JAR% +) +@REM End of extension + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/integration/examples/jib-multimodule/pom.xml b/integration/examples/jib-multimodule/pom.xml new file mode 100644 index 00000000000..aa28047971b --- /dev/null +++ b/integration/examples/jib-multimodule/pom.xml @@ -0,0 +1,46 @@ + + + 4.0.0 + + org.skaffold + parent + 0.1.0 + Multi-Module Example with Skaffold and Jib + pom + + + org.springframework.boot + spring-boot-starter-parent + 2.0.5.RELEASE + + + + 1.8 + + + + + org.springframework.boot + spring-boot-starter-web + + + + + project1 + project2 + + + + + + + com.google.cloud.tools + jib-maven-plugin + 1.0.0 + + + + + diff --git a/integration/examples/jib-multimodule/project1/pom.xml b/integration/examples/jib-multimodule/project1/pom.xml new file mode 100644 index 00000000000..d78cdd80d0a --- /dev/null +++ b/integration/examples/jib-multimodule/project1/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + + + org.skaffold + parent + 0.1.0 + + + skaffold-project-1 + 0.1.0 + Project 1: Spring Boot with Skaffold and Jib + + + 1.8 + + + + hello1 + + + org.springframework.boot + spring-boot-maven-plugin + + + com.google.cloud.tools + jib-maven-plugin + + + gcr.io/k8s-skaffold/project1 + + + + -Djava.security.egd=file:/dev/./urandom + + -XX:+UnlockExperimentalVMOptions + -XX:+UseCGroupMemoryLimitForHeap + + + + + + + default-package + package + + build + + + + + + + diff --git a/integration/examples/jib-multimodule/project1/src/main/java/hello/Application.java b/integration/examples/jib-multimodule/project1/src/main/java/hello/Application.java new file mode 100644 index 00000000000..5d77995503e --- /dev/null +++ b/integration/examples/jib-multimodule/project1/src/main/java/hello/Application.java @@ -0,0 +1,11 @@ +package hello; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application { + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} \ No newline at end of file diff --git a/integration/examples/jib-multimodule/project1/src/main/java/hello/HelloController.java b/integration/examples/jib-multimodule/project1/src/main/java/hello/HelloController.java new file mode 100644 index 00000000000..4121e5c6d11 --- /dev/null +++ b/integration/examples/jib-multimodule/project1/src/main/java/hello/HelloController.java @@ -0,0 +1,12 @@ +package hello; + +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.bind.annotation.RequestMapping; + +@RestController +public class HelloController { + @RequestMapping("/") + public String index() { + return "Hello from project1!"; + } +} diff --git a/integration/examples/jib-multimodule/project2/pom.xml b/integration/examples/jib-multimodule/project2/pom.xml new file mode 100644 index 00000000000..d08fba19b27 --- /dev/null +++ b/integration/examples/jib-multimodule/project2/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + + + org.skaffold + parent + 0.1.0 + + + skaffold-project-2 + 0.1.0 + Project 2: Spring Boot with Skaffold and Jib + + + 1.8 + + + + hello2 + + + org.springframework.boot + spring-boot-maven-plugin + + + com.google.cloud.tools + jib-maven-plugin + + + gcr.io/k8s-skaffold/project2 + + + + -Djava.security.egd=file:/dev/./urandom + + -XX:+UnlockExperimentalVMOptions + -XX:+UseCGroupMemoryLimitForHeap + + + + + + + default-package + package + + build + + + + + + + diff --git a/integration/examples/jib-multimodule/project2/src/main/java/hello/Application.java b/integration/examples/jib-multimodule/project2/src/main/java/hello/Application.java new file mode 100644 index 00000000000..5d77995503e --- /dev/null +++ b/integration/examples/jib-multimodule/project2/src/main/java/hello/Application.java @@ -0,0 +1,11 @@ +package hello; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application { + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} \ No newline at end of file diff --git a/integration/examples/jib-multimodule/project2/src/main/java/hello/HelloController.java b/integration/examples/jib-multimodule/project2/src/main/java/hello/HelloController.java new file mode 100644 index 00000000000..0417f2cd760 --- /dev/null +++ b/integration/examples/jib-multimodule/project2/src/main/java/hello/HelloController.java @@ -0,0 +1,12 @@ +package hello; + +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.bind.annotation.RequestMapping; + +@RestController +public class HelloController { + @RequestMapping("/") + public String index() { + return "Hello from project2!"; + } +} diff --git a/integration/examples/jib-multimodule/skaffold.yaml b/integration/examples/jib-multimodule/skaffold.yaml new file mode 100644 index 00000000000..5603852e1b1 --- /dev/null +++ b/integration/examples/jib-multimodule/skaffold.yaml @@ -0,0 +1,18 @@ +apiVersion: skaffold/v1beta5 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-jib-1 + jibMaven: + # module can be the relative directory + module: project1 + - image: gcr.io/k8s-skaffold/skaffold-jib-2 + jibMaven: + # module can be [groupId]:artifactId + module: :skaffold-project-2 + +# optional profile to run the jib build on Google Cloud Build +profiles: + - name: gcb + build: + googleCloudBuild: {} diff --git a/integration/examples/jib/.mvn/wrapper/MavenWrapperDownloader.java b/integration/examples/jib/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100755 index 00000000000..fa4f7b499fd --- /dev/null +++ b/integration/examples/jib/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,110 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +*/ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = + "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if(mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if(mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: : " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if(!outputFile.getParentFile().exists()) { + if(!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/integration/examples/jib/.mvn/wrapper/maven-wrapper.jar b/integration/examples/jib/.mvn/wrapper/maven-wrapper.jar new file mode 100755 index 00000000000..01e67997377 Binary files /dev/null and b/integration/examples/jib/.mvn/wrapper/maven-wrapper.jar differ diff --git a/integration/examples/jib/.mvn/wrapper/maven-wrapper.properties b/integration/examples/jib/.mvn/wrapper/maven-wrapper.properties new file mode 100755 index 00000000000..cd0d451ccd6 --- /dev/null +++ b/integration/examples/jib/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip diff --git a/integration/examples/jib/k8s/web.yaml b/integration/examples/jib/k8s/web.yaml index 34c04f9b2b8..e0382bd2ac9 100644 --- a/integration/examples/jib/k8s/web.yaml +++ b/integration/examples/jib/k8s/web.yaml @@ -26,4 +26,5 @@ spec: containers: - name: web image: gcr.io/k8s-skaffold/skaffold-jib - + ports: + - containerPort: 8080 diff --git a/integration/examples/jib/mvnw b/integration/examples/jib/mvnw new file mode 100755 index 00000000000..5551fde8e7d --- /dev/null +++ b/integration/examples/jib/mvnw @@ -0,0 +1,286 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + # TODO classpath? +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + wget "$jarUrl" -O "$wrapperJarPath" + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + curl -o "$wrapperJarPath" "$jarUrl" + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/integration/examples/jib/mvnw.cmd b/integration/examples/jib/mvnw.cmd new file mode 100755 index 00000000000..48363fa60b9 --- /dev/null +++ b/integration/examples/jib/mvnw.cmd @@ -0,0 +1,161 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" +FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + echo Found %WRAPPER_JAR% +) else ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" + echo Finished downloading %WRAPPER_JAR% +) +@REM End of extension + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/integration/examples/jib/pom.xml b/integration/examples/jib/pom.xml index 83b6ccf58cf..2d82fb02864 100644 --- a/integration/examples/jib/pom.xml +++ b/integration/examples/jib/pom.xml @@ -34,11 +34,12 @@ com.google.cloud.tools jib-maven-plugin - 0.10.1 + 1.0.0 -Djava.security.egd=file:/dev/./urandom + -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap diff --git a/integration/examples/jib/skaffold.yaml b/integration/examples/jib/skaffold.yaml index c2ca96aa072..371e8e5bc0d 100644 --- a/integration/examples/jib/skaffold.yaml +++ b/integration/examples/jib/skaffold.yaml @@ -1,6 +1,12 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: - image: gcr.io/k8s-skaffold/skaffold-jib jibMaven: {} + +# optional profile to run the jib build on Google Cloud Build +profiles: + - name: gcb + build: + googleCloudBuild: {} diff --git a/integration/examples/kaniko-local/skaffold.yaml b/integration/examples/kaniko-local/skaffold.yaml index f3d5b6f123b..df5be83e7f8 100644 --- a/integration/examples/kaniko-local/skaffold.yaml +++ b/integration/examples/kaniko-local/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/kaniko/skaffold.yaml b/integration/examples/kaniko/skaffold.yaml index 827c2d894ef..b62bc62dab9 100644 --- a/integration/examples/kaniko/skaffold.yaml +++ b/integration/examples/kaniko/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/kustomize/skaffold.yaml b/integration/examples/kustomize/skaffold.yaml index 48bc3a8739a..f6080f60561 100644 --- a/integration/examples/kustomize/skaffold.yaml +++ b/integration/examples/kustomize/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config deploy: kustomize: {} diff --git a/integration/examples/microservices/skaffold.yaml b/integration/examples/microservices/skaffold.yaml index 1f40696dc67..a8b6a457407 100644 --- a/integration/examples/microservices/skaffold.yaml +++ b/integration/examples/microservices/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/nodejs/skaffold.yaml b/integration/examples/nodejs/skaffold.yaml index 94d1b433250..fd6616f52e8 100644 --- a/integration/examples/nodejs/skaffold.yaml +++ b/integration/examples/nodejs/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/structure-tests/skaffold.yaml b/integration/examples/structure-tests/skaffold.yaml index 028409de9fe..f69949f65a8 100644 --- a/integration/examples/structure-tests/skaffold.yaml +++ b/integration/examples/structure-tests/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/tagging-with-environment-variables/skaffold.yaml b/integration/examples/tagging-with-environment-variables/skaffold.yaml index 3437f0db666..f7ba0802d6d 100644 --- a/integration/examples/tagging-with-environment-variables/skaffold.yaml +++ b/integration/examples/tagging-with-environment-variables/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/test-dev-job/skaffold.yaml b/integration/examples/test-dev-job/skaffold.yaml index 51ae3915e28..84c448ec596 100644 --- a/integration/examples/test-dev-job/skaffold.yaml +++ b/integration/examples/test-dev-job/skaffold.yaml @@ -1,4 +1,4 @@ -apiVersion: skaffold/v1beta2 +apiVersion: skaffold/v1beta5 kind: Config build: artifacts: diff --git a/integration/examples/test-plugin/gcb/leeroy-app/Dockerfile b/integration/examples/test-plugin/gcb/leeroy-app/Dockerfile new file mode 100644 index 00000000000..1c02a3644d9 --- /dev/null +++ b/integration/examples/test-plugin/gcb/leeroy-app/Dockerfile @@ -0,0 +1,9 @@ +FROM golang:1.10.1-alpine3.7 as builder +COPY app.go . +RUN go build -o /app . + +FROM alpine:3.7 as targetStage +CMD ["./app"] +COPY --from=builder /app . + +FROM busybox as unusedStage diff --git a/integration/examples/test-plugin/gcb/leeroy-app/app.go b/integration/examples/test-plugin/gcb/leeroy-app/app.go new file mode 100644 index 00000000000..40f9c08afac --- /dev/null +++ b/integration/examples/test-plugin/gcb/leeroy-app/app.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + "log" + "net/http" +) + +func handler(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "leeroooooy app!!\n") +} + +func main() { + log.Print("leeroy app server ready") + http.HandleFunc("/", handler) + http.ListenAndServe(":50051", nil) +} diff --git a/integration/examples/test-plugin/gcb/leeroy-app/kubernetes/deployment.yaml b/integration/examples/test-plugin/gcb/leeroy-app/kubernetes/deployment.yaml new file mode 100644 index 00000000000..56ef7152fef --- /dev/null +++ b/integration/examples/test-plugin/gcb/leeroy-app/kubernetes/deployment.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: leeroy-app + labels: + app: leeroy-app +spec: + clusterIP: None + ports: + - port: 50051 + name: leeroy-app + selector: + app: leeroy-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: leeroy-app + labels: + app: leeroy-app +spec: + replicas: 1 + selector: + matchLabels: + app: leeroy-app + template: + metadata: + labels: + app: leeroy-app + spec: + containers: + - name: leeroy-app + image: gcr.io/k8s-skaffold/leeroy-app + ports: + - containerPort: 50051 diff --git a/integration/examples/test-plugin/gcb/leeroy-web/Dockerfile b/integration/examples/test-plugin/gcb/leeroy-web/Dockerfile new file mode 100644 index 00000000000..fba8dd8007b --- /dev/null +++ b/integration/examples/test-plugin/gcb/leeroy-web/Dockerfile @@ -0,0 +1,7 @@ +FROM golang:1.10.1-alpine3.7 as builder +COPY web.go . +RUN go build -o /web . + +FROM alpine:3.7 +CMD ["./web"] +COPY --from=builder /web . diff --git a/integration/examples/test-plugin/gcb/leeroy-web/kubernetes/deployment.yaml b/integration/examples/test-plugin/gcb/leeroy-web/kubernetes/deployment.yaml new file mode 100644 index 00000000000..fd8ad0bc032 --- /dev/null +++ b/integration/examples/test-plugin/gcb/leeroy-web/kubernetes/deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: leeroy-web + labels: + app: leeroy-web +spec: + type: NodePort + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: leeroy-web + selector: + app: leeroy-web +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: leeroy-web + labels: + app: leeroy-web +spec: + replicas: 1 + selector: + matchLabels: + app: leeroy-web + template: + metadata: + labels: + app: leeroy-web + spec: + containers: + - name: leeroy-web + image: gcr.io/k8s-skaffold/leeroy-web + ports: + - containerPort: 8080 diff --git a/integration/examples/test-plugin/gcb/leeroy-web/web.go b/integration/examples/test-plugin/gcb/leeroy-web/web.go new file mode 100644 index 00000000000..419fd62e067 --- /dev/null +++ b/integration/examples/test-plugin/gcb/leeroy-web/web.go @@ -0,0 +1,26 @@ +package main + +import ( + "io" + "net/http" + + "log" +) + +func handler(w http.ResponseWriter, r *http.Request) { + resp, err := http.Get("http://leeroy-app:50051") + if err != nil { + panic(err) + } + defer resp.Body.Close() + if _, err := io.Copy(w, resp.Body); err != nil { + panic(err) + } + +} + +func main() { + log.Print("leeroy web server ready") + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} diff --git a/integration/examples/test-plugin/gcb/skaffold.yaml b/integration/examples/test-plugin/gcb/skaffold.yaml new file mode 100644 index 00000000000..54424f8de07 --- /dev/null +++ b/integration/examples/test-plugin/gcb/skaffold.yaml @@ -0,0 +1,21 @@ +apiVersion: skaffold/v1beta5 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/leeroy-web + context: ./leeroy-web/ + plugin: + name: docker + - image: gcr.io/k8s-skaffold/leeroy-app + context: ./leeroy-app/ + plugin: + name: docker + properties: + target: targetStage + executionEnvironment: + name: googleCloudBuild +deploy: + kubectl: + manifests: + - ./leeroy-web/kubernetes/* + - ./leeroy-app/kubernetes/* diff --git a/integration/examples/test-plugin/local/bazel/BUILD b/integration/examples/test-plugin/local/bazel/BUILD new file mode 100644 index 00000000000..ba00afe13d3 --- /dev/null +++ b/integration/examples/test-plugin/local/bazel/BUILD @@ -0,0 +1,9 @@ +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_image( + name = "skaffold_example", + srcs = ["main.go"], + goos = "linux", + goarch = "amd64", + static = "on", +) diff --git a/integration/examples/test-plugin/local/bazel/WORKSPACE b/integration/examples/test-plugin/local/bazel/WORKSPACE new file mode 100644 index 00000000000..6f90ba50199 --- /dev/null +++ b/integration/examples/test-plugin/local/bazel/WORKSPACE @@ -0,0 +1,30 @@ +workspace(name = "skaffold") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_docker", + strip_prefix = "rules_docker-0.7.0", + urls = ["https://github.com/bazelbuild/rules_docker/archive/v0.7.0.tar.gz"], + sha256 = "aed1c249d4ec8f703edddf35cbe9dfaca0b5f5ea6e4cd9e83e99f3b0d1136c3d", +) + +http_archive( + name = "io_bazel_rules_go", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.5/rules_go-0.16.5.tar.gz"], + sha256 = "7be7dc01f1e0afdba6c8eb2b43d2fa01c743be1b9273ab1eaf6c233df078d705", +) + +load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") + +go_rules_dependencies() +go_register_toolchains( + go_version = "1.10.1", +) + +load( + "@io_bazel_rules_docker//go:image.bzl", + _go_image_repos = "repositories", +) + +_go_image_repos() \ No newline at end of file diff --git a/integration/examples/test-plugin/local/bazel/k8s/k8s-pod.yaml b/integration/examples/test-plugin/local/bazel/k8s/k8s-pod.yaml new file mode 100644 index 00000000000..2ecd153fb4f --- /dev/null +++ b/integration/examples/test-plugin/local/bazel/k8s/k8s-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bazel +spec: + containers: + - name: bazel + image: gcr.io/k8s-skaffold/skaffold-bazel diff --git a/integration/examples/test-plugin/local/bazel/main.go b/integration/examples/test-plugin/local/bazel/main.go new file mode 100644 index 00000000000..8f98567919a --- /dev/null +++ b/integration/examples/test-plugin/local/bazel/main.go @@ -0,0 +1,13 @@ +package main + +import ( + "fmt" + "time" +) + +func main() { + for { + fmt.Println("Hello bazel!!!!") + time.Sleep(time.Second * 1) + } +} diff --git a/integration/examples/test-plugin/local/bazel/skaffold.yaml b/integration/examples/test-plugin/local/bazel/skaffold.yaml new file mode 100644 index 00000000000..283a2c13e78 --- /dev/null +++ b/integration/examples/test-plugin/local/bazel/skaffold.yaml @@ -0,0 +1,10 @@ +apiVersion: skaffold/v1beta5 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-bazel + context: . + plugin: + name: bazel + properties: + target: //:skaffold_example.tar diff --git a/integration/fix_test.go b/integration/fix_test.go new file mode 100644 index 00000000000..75cf7b5f51b --- /dev/null +++ b/integration/fix_test.go @@ -0,0 +1,47 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bytes" + "os/exec" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" +) + +func TestFix(t *testing.T) { + ns, deleteNs := SetupNamespace(t) + defer deleteNs() + + fixCmd := exec.Command("skaffold", "fix", "-f", "skaffold.yaml") + fixCmd.Dir = "testdata/fix" + out, err := util.RunCmdOut(fixCmd) + if err != nil { + t.Fatalf("testing error: %v", err) + } + + runCmd := exec.Command("skaffold", "run", "--namespace", ns.Name, "-f", "-") + runCmd.Dir = "testdata/fix" + runCmd.Stdin = bytes.NewReader(out) + + if err := util.RunCmd(runCmd); err != nil { + t.Fatalf("testing error: %v", err) + } +} diff --git a/integration/init_test.go b/integration/init_test.go new file mode 100644 index 00000000000..445ceb45bd3 --- /dev/null +++ b/integration/init_test.go @@ -0,0 +1,118 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" +) + +func TestInit(t *testing.T) { + type testCase struct { + name string + dir string + args []string + skipSkaffoldYaml bool + } + + tests := []testCase{ + { + name: "getting-started", + dir: "../examples/getting-started", + }, + { + name: "microservices", + dir: "../examples/microservices", + args: []string{ + "-a", "leeroy-app/Dockerfile=gcr.io/k8s-skaffold/leeroy-app", + "-a", "leeroy-web/Dockerfile=gcr.io/k8s-skaffold/leeroy-web", + }, + }, + { + name: "compose", + dir: "../examples/compose", + args: []string{"--compose-file", "docker-compose.yaml"}, + skipSkaffoldYaml: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if !test.skipSkaffoldYaml { + oldYamlPath := filepath.Join(test.dir, "skaffold.yaml") + oldYaml, err := removeOldSkaffoldYaml(oldYamlPath) + if err != nil { + t.Fatalf("removing original skaffold.yaml: %s", err) + } + defer restoreOldSkaffoldYaml(oldYaml, oldYamlPath) + } + + generatedYaml := "skaffold.yaml.out" + defer func() { + err := os.Remove(filepath.Join(test.dir, generatedYaml)) + if err != nil { + t.Errorf("error removing generated skaffold yaml: %v", err) + } + }() + initArgs := []string{"init", "--force", "-f", generatedYaml} + initArgs = append(initArgs, test.args...) + initCmd := exec.Command("skaffold", initArgs...) + initCmd.Dir = test.dir + + out, err := util.RunCmdOut(initCmd) + if err != nil { + t.Fatalf("running init: %v, output: %s", err, out) + } + + runCmd := exec.Command("skaffold", "run", "-f", generatedYaml) + runCmd.Dir = test.dir + out, err = util.RunCmdOut(runCmd) + if err != nil { + t.Fatalf("running skaffold on generated yaml: %v, output: %s", err, out) + } + }) + } +} + +func removeOldSkaffoldYaml(path string) ([]byte, error) { + skaffoldYaml, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + if err = os.Remove(path); err != nil { + return nil, err + } + return skaffoldYaml, nil +} + +func restoreOldSkaffoldYaml(contents []byte, path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + if _, err := f.Write(contents); err != nil { + return err + } + return nil +} diff --git a/integration/main_test.go b/integration/main_test.go new file mode 100644 index 00000000000..5e94db9a359 --- /dev/null +++ b/integration/main_test.go @@ -0,0 +1,59 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "flag" + "os" + "os/exec" + "testing" + + kubernetesutil "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/sirupsen/logrus" +) + +var ( + gkeZone = flag.String("gke-zone", "us-central1-a", "gke zone") + gkeClusterName = flag.String("gke-cluster-name", "integration-tests", "name of the integration test cluster") + gcpProject = flag.String("gcp-project", "k8s-skaffold", "the gcp project where the integration test cluster lives") + remote = flag.Bool("remote", false, "if true, run tests on a remote GKE cluster") + + // Client kubernetes.Interface +) + +func TestMain(m *testing.M) { + flag.Parse() + if *remote { + cmd := exec.Command("gcloud", "container", "clusters", "get-credentials", *gkeClusterName, "--zone", *gkeZone, "--project", *gcpProject) + if err := util.RunCmd(cmd); err != nil { + logrus.Fatalf("Error authenticating to GKE cluster stdout: %v", err) + } + } + + var err error + Client, err = kubernetesutil.GetClientset() + if err != nil { + logrus.Fatalf("Test setup error: getting kubernetes client: %s", err) + } + + exitCode := m.Run() + + os.Exit(exitCode) +} diff --git a/integration/run_test.go b/integration/run_test.go index 07c28e32e19..c8e8077d2e5 100644 --- a/integration/run_test.go +++ b/integration/run_test.go @@ -1,7 +1,7 @@ // +build integration /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,611 +19,115 @@ limitations under the License. package integration import ( - "bytes" "context" - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" "testing" "time" - "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/cmd/config" kubernetesutil "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" - "github.com/GoogleContainerTools/skaffold/testutil" - - "github.com/sirupsen/logrus" - yaml "gopkg.in/yaml.v2" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -var ( - gkeZone = flag.String("gke-zone", "us-central1-a", "gke zone") - gkeClusterName = flag.String("gke-cluster-name", "integration-tests", "name of the integration test cluster") - gcpProject = flag.String("gcp-project", "k8s-skaffold", "the gcp project where the integration test cluster lives") - remote = flag.Bool("remote", false, "if true, run tests on a remote GKE cluster") - - client kubernetes.Interface ) -func TestMain(m *testing.M) { - flag.Parse() - if *remote { - cmd := exec.Command("gcloud", "container", "clusters", "get-credentials", *gkeClusterName, "--zone", *gkeZone, "--project", *gcpProject) - if err := util.RunCmd(cmd); err != nil { - logrus.Fatalf("Error authenticating to GKE cluster stdout: %v", err) - } - } - - var err error - client, err = kubernetesutil.GetClientset() - if err != nil { - logrus.Fatalf("Test setup error: getting kubernetes client: %s", err) - } - - exitCode := m.Run() - - os.Exit(exitCode) -} - func TestRun(t *testing.T) { - type testRunCase struct { - description string - dir string - filename string - args []string - deployments []string - pods []string - deploymentValidation func(t *testing.T, d *appsv1.Deployment) - env []string - - remoteOnly bool - } - - var testCases = []testRunCase{ - { - description: "getting-started example", - args: []string{"run"}, - pods: []string{"getting-started"}, + tests := []struct { + description string + dir string + filename string + args []string + deployments []string + pods []string + env []string + remoteOnly bool + }{ + { + description: "getting-started", dir: "examples/getting-started", - }, - { - description: "annotated getting-started example", - args: []string{"run"}, - filename: "annotated-skaffold.yaml", pods: []string{"getting-started"}, - dir: "examples", - }, - { - description: "getting-started envTagger", - args: []string{"run"}, + }, { + description: "nodejs", + dir: "examples/nodejs", + pods: []string{"node"}, + }, { + description: "structure-tests", + dir: "examples/structure-tests", pods: []string{"getting-started"}, + }, { + description: "microservices", + dir: "examples/microservices", + deployments: []string{"leeroy-app", "leeroy-web"}, + }, { + description: "envTagger", dir: "examples/tagging-with-environment-variables", - env: []string{"FOO=foo"}, - }, - { - description: "gcb builder example", - args: []string{"run", "-p", "gcb"}, pods: []string{"getting-started"}, + env: []string{"FOO=foo"}, + }, { + description: "bazel", + dir: "examples/bazel", + pods: []string{"bazel"}, + }, { + description: "Google Cloud Build", dir: "examples/structure-tests", + args: []string{"-p", "gcb"}, + pods: []string{"getting-started"}, remoteOnly: true, - }, - { - description: "deploy kustomize", - args: []string{"deploy", "--images", "index.docker.io/library/busybox:1"}, - deployments: []string{"kustomize-test"}, - deploymentValidation: func(t *testing.T, d *appsv1.Deployment) { - if d == nil { - t.Fatalf("Could not find deployment") - } - if d.Spec.Template.Spec.Containers[0].Image != "index.docker.io/library/busybox:1" { - t.Fatalf("Wrong image name in kustomized deployment: %s", d.Spec.Template.Spec.Containers[0].Image) - } - }, - dir: "examples/kustomize", - }, - { - description: "bazel example", - args: []string{"run"}, - pods: []string{"bazel"}, - dir: "examples/bazel", - }, - { - description: "kaniko example", - args: []string{"run"}, - pods: []string{"getting-started-kaniko"}, - dir: "examples/kaniko", + }, { + description: "Google Cloud Build - sub folder", + dir: "testdata/gcb-sub-folder", + pods: []string{"getting-started"}, remoteOnly: true, - }, - { - description: "kaniko local example", - args: []string{"run"}, + }, { + description: "kaniko", + dir: "examples/kaniko", pods: []string{"getting-started-kaniko"}, + remoteOnly: true, + }, { + description: "kaniko local", dir: "examples/kaniko-local", + pods: []string{"getting-started-kaniko"}, remoteOnly: true, - }, - { - description: "helm example", - args: []string{"run"}, - deployments: []string{"skaffold-helm"}, + }, { + description: "kaniko local - sub folder", + dir: "testdata/kaniko-sub-folder", + pods: []string{"getting-started-kaniko"}, + remoteOnly: true, + }, { + description: "helm", dir: "examples/helm-deployment", + deployments: []string{"skaffold-helm"}, remoteOnly: true, + }, { + description: "docker plugin in gcb exec environment", + dir: "examples/test-plugin/gcb", + deployments: []string{"leeroy-app", "leeroy-web"}, + }, { + description: "bazel plugin in local exec environment", + dir: "examples/test-plugin/local/bazel", + pods: []string{"bazel"}, }, } - for _, testCase := range testCases { - t.Run(testCase.description, func(t *testing.T) { - if !*remote && testCase.remoteOnly { + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + if !*remote && test.remoteOnly { t.Skip("skipping remote only test") } - ns, deleteNs := setupNamespace(t) + ns, deleteNs := SetupNamespace(t) defer deleteNs() - args := []string{} - args = append(args, testCase.args...) - args = append(args, "--namespace", ns.Name) - if testCase.filename != "" { - args = append(args, "-f", testCase.filename) - } - - cmd := exec.Command("skaffold", args...) - cmd.Env = append(os.Environ(), testCase.env...) - cmd.Dir = testCase.dir - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("skaffold: %s %v", output, err) - } + RunSkaffold(t, "run", test.dir, ns.Name, test.filename, test.env) - for _, p := range testCase.pods { - if err := kubernetesutil.WaitForPodReady(context.Background(), client.CoreV1().Pods(ns.Name), p); err != nil { + for _, p := range test.pods { + if err := kubernetesutil.WaitForPodReady(context.Background(), Client.CoreV1().Pods(ns.Name), p); err != nil { t.Fatalf("Timed out waiting for pod ready") } } - for _, d := range testCase.deployments { - if err := kubernetesutil.WaitForDeploymentToStabilize(context.Background(), client, ns.Name, d, 10*time.Minute); err != nil { + for _, d := range test.deployments { + if err := kubernetesutil.WaitForDeploymentToStabilize(context.Background(), Client, ns.Name, d, 10*time.Minute); err != nil { t.Fatalf("Timed out waiting for deployment to stabilize") } - if testCase.deploymentValidation != nil { - deployment, err := client.AppsV1().Deployments(ns.Name).Get(d, meta_v1.GetOptions{}) - if err != nil { - t.Fatalf("Could not find deployment: %s %s", ns.Name, d) - } - testCase.deploymentValidation(t, deployment) - } } - // Cleanup - args = []string{"delete", "--namespace", ns.Name} - if testCase.filename != "" { - args = append(args, "-f", testCase.filename) - } - cmd = exec.Command("skaffold", args...) - cmd.Dir = testCase.dir - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("skaffold delete: %s %v", output, err) - } + RunSkaffold(t, "delete", test.dir, ns.Name, test.filename, test.env) }) } } - -func TestDev(t *testing.T) { - type testDevCase struct { - description string - dir string - args []string - setup func(t *testing.T) func(t *testing.T) - postSetup func(t *testing.T) func(t *testing.T) - pods []string - jobs []string - jobValidation func(t *testing.T, ns *v1.Namespace, j *batchv1.Job) - validation func(t *testing.T, ns *v1.Namespace) - } - - testCases := []testDevCase{ - { - description: "delete and redeploy job", - dir: "examples/test-dev-job", - args: []string{"dev"}, - setup: func(t *testing.T) func(t *testing.T) { - // create foo - cmd := exec.Command("touch", "examples/test-dev-job/foo") - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("creating foo: %s %v", output, err) - } - return func(t *testing.T) { - // delete foo - cmd := exec.Command("rm", "examples/test-dev-job/foo") - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("creating foo: %s %v", output, err) - } - } - }, - jobs: []string{ - "test-dev-job", - }, - jobValidation: func(t *testing.T, ns *v1.Namespace, j *batchv1.Job) { - originalUID := j.GetUID() - // Make a change to foo so that dev is forced to delete the job and redeploy - cmd := exec.Command("sh", "-c", "echo bar > examples/test-dev-job/foo") - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("creating bar: %s %v", output, err) - } - // Make sure the UID of the old Job and the UID of the new Job is different - err := wait.PollImmediate(time.Millisecond*500, 10*time.Minute, func() (bool, error) { - newJob, err := client.BatchV1().Jobs(ns.Name).Get(j.Name, meta_v1.GetOptions{}) - if err != nil { - return false, nil - } - return originalUID != newJob.GetUID(), nil - }) - if err != nil { - t.Fatalf("redeploy failed: %v", err) - } - }, - }, - { - description: "create a directory with file sync", - dir: "examples/test-file-sync", - args: []string{"dev"}, - pods: []string{"test-file-sync"}, - postSetup: func(t *testing.T) func(t *testing.T) { - cmd := exec.Command("mkdir", "-p", "test") - cmd.Dir = "examples/test-file-sync" - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("creating test dir: %s %v", output, err) - } - cmd = exec.Command("touch", "test/foobar") - cmd.Dir = "examples/test-file-sync" - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("creating test/foo: %s %v", output, err) - } - return func(t *testing.T) { - cmd := exec.Command("rm", "-rf", "test") - cmd.Dir = "examples/test-file-sync" - if output, err := util.RunCmdOut(cmd); err != nil { - t.Fatalf("removing test dir: %s %v", output, err) - } - } - }, - validation: func(t *testing.T, ns *v1.Namespace) { - // try to run this command successfully for one minute - err := wait.PollImmediate(time.Millisecond*500, 1*time.Minute, func() (bool, error) { - cmd := exec.Command("kubectl", "exec", "test-file-sync", "-n", ns.Name, "--", "ls", "/test") - _, err := util.RunCmdOut(cmd) - return err == nil, nil - }) - if err != nil { - t.Fatalf("checking if /test dir exists in container: %v", err) - } - }, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.description, func(t *testing.T) { - ns, deleteNs := setupNamespace(t) - defer deleteNs() - - if testCase.setup != nil { - cleanupTC := testCase.setup(t) - defer cleanupTC(t) - } - - args := []string{} - args = append(args, testCase.args...) - args = append(args, "--namespace", ns.Name) - - cmd := exec.Command("skaffold", args...) - cmd.Dir = testCase.dir - go func() { - if output, err := util.RunCmdOut(cmd); err != nil { - logrus.Warnf("skaffold: %s %v", output, err) - } - }() - - for _, j := range testCase.jobs { - if err := kubernetesutil.WaitForJobToStabilize(context.Background(), client, ns.Name, j, 10*time.Minute); err != nil { - t.Fatalf("Timed out waiting for job to stabilize") - } - if testCase.jobValidation != nil { - job, err := client.BatchV1().Jobs(ns.Name).Get(j, meta_v1.GetOptions{}) - if err != nil { - t.Fatalf("Could not find job: %s %s", ns.Name, j) - } - testCase.jobValidation(t, ns, job) - } - } - - for _, p := range testCase.pods { - if err := kubernetesutil.WaitForPodReady(context.Background(), client.CoreV1().Pods(ns.Name), p); err != nil { - t.Fatalf("Timed out waiting for pod ready") - } - } - - if testCase.postSetup != nil { - cleanup := testCase.postSetup(t) - defer cleanup(t) - } - - if testCase.validation != nil { - testCase.validation(t, ns) - } - // No cleanup, since exiting skaffold dev should clean up automatically - }) - } -} - -func setupNamespace(t *testing.T) (*v1.Namespace, func()) { - ns, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ - ObjectMeta: meta_v1.ObjectMeta{ - GenerateName: "skaffold", - }, - }) - if err != nil { - t.Fatalf("creating namespace: %s", err) - } - - return ns, func() { - client.CoreV1().Namespaces().Delete(ns.Name, &meta_v1.DeleteOptions{}) - } -} - -func TestFix(t *testing.T) { - ns, deleteNs := setupNamespace(t) - defer deleteNs() - - fixCmd := exec.Command("skaffold", "fix", "-f", "skaffold.yaml") - fixCmd.Dir = "testdata" - out, err := util.RunCmdOut(fixCmd) - if err != nil { - t.Fatalf("testing error: %v", err) - } - - runCmd := exec.Command("skaffold", "run", "--namespace", ns.Name, "-f", "-") - runCmd.Dir = "testdata" - runCmd.Stdin = bytes.NewReader(out) - err = util.RunCmd(runCmd) - if err != nil { - t.Fatalf("testing error: %v", err) - } -} - -func TestListConfig(t *testing.T) { - baseConfig := &config.Config{ - Global: &config.ContextConfig{ - DefaultRepo: "global-repository", - }, - ContextConfigs: []*config.ContextConfig{ - { - Kubecontext: "test-context", - DefaultRepo: "context-local-repository", - }, - }, - } - - c, _ := yaml.Marshal(*baseConfig) - cfg, teardown := testutil.TempFile(t, "config", c) - defer teardown() - - type testListCase struct { - description string - kubectx string - expectedOutput []string - } - - var tests = []testListCase{ - { - description: "list for test-context", - kubectx: "test-context", - expectedOutput: []string{"default-repo: context-local-repository"}, - }, - { - description: "list all", - expectedOutput: []string{ - "global:", - "default-repo: global-repository", - "kube-context: test-context", - "default-repo: context-local-repository", - }, - }, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - args := []string{"config", "list", "-c", cfg} - if test.kubectx != "" { - args = append(args, "-k", test.kubectx) - } else { - args = append(args, "--all") - } - cmd := exec.Command("skaffold", args...) - rawOut, err := util.RunCmdOut(cmd) - if err != nil { - t.Error(err) - } - out := string(rawOut) - for _, output := range test.expectedOutput { - if !strings.Contains(out, output) { - t.Errorf("expected output %s not found in output: %s", output, out) - } - } - }) - } -} - -func TestInit(t *testing.T) { - type testCase struct { - name string - dir string - args []string - skipSkaffoldYaml bool - } - - tests := []testCase{ - { - name: "getting-started", - dir: "../examples/getting-started", - }, - { - name: "microservices", - dir: "../examples/microservices", - args: []string{ - "-a", "leeroy-app/Dockerfile=gcr.io/k8s-skaffold/leeroy-app", - "-a", "leeroy-web/Dockerfile=gcr.io/k8s-skaffold/leeroy-web", - }, - }, - { - name: "compose", - dir: "../examples/compose", - args: []string{"--compose-file", "docker-compose.yaml"}, - skipSkaffoldYaml: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if !test.skipSkaffoldYaml { - oldYamlPath := filepath.Join(test.dir, "skaffold.yaml") - oldYaml, err := removeOldSkaffoldYaml(oldYamlPath) - if err != nil { - t.Fatalf("removing original skaffold.yaml: %s", err) - } - defer restoreOldSkaffoldYaml(oldYaml, oldYamlPath) - } - - generatedYaml := "skaffold.yaml.out" - defer func() { - err := os.Remove(filepath.Join(test.dir, generatedYaml)) - if err != nil { - t.Errorf("error removing generated skaffold yaml: %v", err) - } - }() - initArgs := []string{"init", "--force", "-f", generatedYaml} - initArgs = append(initArgs, test.args...) - initCmd := exec.Command("skaffold", initArgs...) - initCmd.Dir = test.dir - - out, err := util.RunCmdOut(initCmd) - if err != nil { - t.Fatalf("running init: %v, output: %s", err, out) - } - - runCmd := exec.Command("skaffold", "run", "-f", generatedYaml) - runCmd.Dir = test.dir - out, err = util.RunCmdOut(runCmd) - if err != nil { - t.Fatalf("running skaffold on generated yaml: %v, output: %s", err, out) - } - }) - } -} - -func TestSetConfig(t *testing.T) { - baseConfig := &config.Config{ - Global: &config.ContextConfig{ - DefaultRepo: "global-repository", - }, - ContextConfigs: []*config.ContextConfig{ - { - Kubecontext: "test-context", - DefaultRepo: "context-local-repository", - }, - }, - } - - c, _ := yaml.Marshal(*baseConfig) - cfg, teardown := testutil.TempFile(t, "config", c) - defer teardown() - - type testSetCase struct { - description string - kubectx string - key string - shouldErr bool - } - - var tests = []testSetCase{ - { - description: "set default-repo for context", - kubectx: "test-context", - key: "default-repo", - }, - { - description: "set global default-repo", - key: "default-repo", - }, - { - description: "fail to set unrecognized value", - key: "doubt-this-will-ever-be-a-config-value", - shouldErr: true, - }, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - value := util.RandomID() - args := []string{"config", "set", test.key, value} - args = append(args, "-c", cfg) - if test.kubectx != "" { - args = append(args, "-k", test.kubectx) - } else { - args = append(args, "--global") - } - cmd := exec.Command("skaffold", args...) - if err := util.RunCmd(cmd); err != nil { - if test.shouldErr { - return - } - t.Error(err) - } - - listArgs := []string{"config", "list", "-c", cfg} - if test.kubectx != "" { - listArgs = append(listArgs, "-k", test.kubectx) - } else { - listArgs = append(listArgs, "--all") - } - listCmd := exec.Command("skaffold", listArgs...) - out, err := util.RunCmdOut(listCmd) - if err != nil { - t.Error(err) - } - t.Log(string(out)) - if !strings.Contains(string(out), fmt.Sprintf("%s: %s", test.key, value)) { - t.Errorf("value %s not set correctly", test.key) - } - }) - } -} - -func removeOldSkaffoldYaml(path string) ([]byte, error) { - skaffoldYaml, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - if err = os.Remove(path); err != nil { - return nil, err - } - return skaffoldYaml, nil -} - -func restoreOldSkaffoldYaml(contents []byte, path string) error { - f, err := os.Create(path) - if err != nil { - return err - } - if _, err := f.Write(contents); err != nil { - return err - } - return nil -} diff --git a/integration/sync_test.go b/integration/sync_test.go new file mode 100644 index 00000000000..57af1c6cfe6 --- /dev/null +++ b/integration/sync_test.go @@ -0,0 +1,56 @@ +// +build integration + +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "os/exec" + "testing" + "time" + + kubernetesutil "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "k8s.io/apimachinery/pkg/util/wait" +) + +func TestDevSync(t *testing.T) { + ns, deleteNs := SetupNamespace(t) + defer deleteNs() + + cancel := make(chan bool) + go RunSkaffoldNoFail(cancel, "dev", "examples/test-file-sync", ns.Name, "", nil) + defer func() { cancel <- true }() + + if err := kubernetesutil.WaitForPodReady(context.Background(), Client.CoreV1().Pods(ns.Name), "test-file-sync"); err != nil { + t.Fatalf("Timed out waiting for pod ready") + } + + Run(t, "examples/test-file-sync", "mkdir", "-p", "test") + Run(t, "examples/test-file-sync", "touch", "test/foobar") + defer Run(t, "examples/test-file-sync", "rm", "-rf", "test") + + err := wait.PollImmediate(time.Millisecond*500, 1*time.Minute, func() (bool, error) { + cmd := exec.Command("kubectl", "exec", "test-file-sync", "-n", ns.Name, "--", "ls", "/test") + _, err := util.RunCmdOut(cmd) + return err == nil, nil + }) + if err != nil { + t.Fatalf("checking if /test dir exists in container: %v", err) + } +} diff --git a/integration/testdata/build/Dockerfile b/integration/testdata/build/Dockerfile new file mode 100644 index 00000000000..4652feec056 --- /dev/null +++ b/integration/testdata/build/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +COPY . /data diff --git a/integration/testdata/build/multi-env/Dockerfile b/integration/testdata/build/multi-env/Dockerfile new file mode 100644 index 00000000000..cfa889d4a9c --- /dev/null +++ b/integration/testdata/build/multi-env/Dockerfile @@ -0,0 +1,7 @@ +FROM busybox + +ENV file1=file1 \ + file2=file2 + +COPY $file1 $file2 /data/ +RUN [ "$(find /data -type f | wc -l | xargs)" == "2" ] \ No newline at end of file diff --git a/integration/testdata/build/multi-env/file1 b/integration/testdata/build/multi-env/file1 new file mode 100644 index 00000000000..08219db9b09 --- /dev/null +++ b/integration/testdata/build/multi-env/file1 @@ -0,0 +1 @@ +file1 \ No newline at end of file diff --git a/integration/testdata/build/multi-env/file2 b/integration/testdata/build/multi-env/file2 new file mode 100644 index 00000000000..30d67d4672d --- /dev/null +++ b/integration/testdata/build/multi-env/file2 @@ -0,0 +1 @@ +file2 \ No newline at end of file diff --git a/integration/testdata/build/skaffold.yaml b/integration/testdata/build/skaffold.yaml new file mode 100644 index 00000000000..e4d7473807d --- /dev/null +++ b/integration/testdata/build/skaffold.yaml @@ -0,0 +1,35 @@ +apiVersion: skaffold/v1beta5 +kind: Config +build: + local: + push: false + artifacts: + # A simple Docker build + - image: simple-build + + # Building from a sub-directory + - image: sub-directory + context: sub-directory + + # Testing multiline env variables in Dockerfiles + # Would have caught #1624 + - image: multi-env + context: multi-env + + # Testing Dockerfiles with targets + - image: targets + context: targets + + # Providing a target + # Would have caught #1605 + - image: target1 + context: targets + docker: + target: target1 + + # Providing another target + # Would have caught #1605 + - image: target2 + context: targets + docker: + target: target2 diff --git a/integration/testdata/build/sub-directory/Dockerfile b/integration/testdata/build/sub-directory/Dockerfile new file mode 100644 index 00000000000..3830aaed78e --- /dev/null +++ b/integration/testdata/build/sub-directory/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox + +COPY file1 file2 /data/ +RUN [ "$(find /data -type f | wc -l | xargs)" == "2" ] \ No newline at end of file diff --git a/integration/testdata/build/sub-directory/file1 b/integration/testdata/build/sub-directory/file1 new file mode 100644 index 00000000000..08219db9b09 --- /dev/null +++ b/integration/testdata/build/sub-directory/file1 @@ -0,0 +1 @@ +file1 \ No newline at end of file diff --git a/integration/testdata/build/sub-directory/file2 b/integration/testdata/build/sub-directory/file2 new file mode 100644 index 00000000000..30d67d4672d --- /dev/null +++ b/integration/testdata/build/sub-directory/file2 @@ -0,0 +1 @@ +file2 \ No newline at end of file diff --git a/integration/testdata/build/targets/Dockerfile b/integration/testdata/build/targets/Dockerfile new file mode 100644 index 00000000000..9440d8c8673 --- /dev/null +++ b/integration/testdata/build/targets/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox as target1 + +COPY file1 file2 /data/ +RUN [ "$(find /data -type f | wc -l | xargs)" == "2" ] + +FROM busybox as target2 + +COPY file3 /data/ +RUN [ "$(find /data -type f | wc -l | xargs)" == "1" ] \ No newline at end of file diff --git a/integration/testdata/build/targets/file1 b/integration/testdata/build/targets/file1 new file mode 100644 index 00000000000..08219db9b09 --- /dev/null +++ b/integration/testdata/build/targets/file1 @@ -0,0 +1 @@ +file1 \ No newline at end of file diff --git a/integration/testdata/build/targets/file2 b/integration/testdata/build/targets/file2 new file mode 100644 index 00000000000..30d67d4672d --- /dev/null +++ b/integration/testdata/build/targets/file2 @@ -0,0 +1 @@ +file2 \ No newline at end of file diff --git a/integration/testdata/build/targets/file3 b/integration/testdata/build/targets/file3 new file mode 100644 index 00000000000..873fb8d667d --- /dev/null +++ b/integration/testdata/build/targets/file3 @@ -0,0 +1 @@ +file3 \ No newline at end of file diff --git a/integration/testdata/Dockerfile b/integration/testdata/fix/Dockerfile similarity index 100% rename from integration/testdata/Dockerfile rename to integration/testdata/fix/Dockerfile diff --git a/integration/testdata/k8s-pod.yaml b/integration/testdata/fix/k8s-pod.yaml similarity index 100% rename from integration/testdata/k8s-pod.yaml rename to integration/testdata/fix/k8s-pod.yaml diff --git a/integration/testdata/main.go b/integration/testdata/fix/main.go similarity index 100% rename from integration/testdata/main.go rename to integration/testdata/fix/main.go diff --git a/integration/testdata/skaffold.yaml b/integration/testdata/fix/skaffold.yaml similarity index 100% rename from integration/testdata/skaffold.yaml rename to integration/testdata/fix/skaffold.yaml diff --git a/integration/testdata/gcb-sub-folder/k8s/pod.yaml b/integration/testdata/gcb-sub-folder/k8s/pod.yaml new file mode 100644 index 00000000000..869f33a9c34 --- /dev/null +++ b/integration/testdata/gcb-sub-folder/k8s/pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: getting-started +spec: + containers: + - name: getting-started + image: gcr.io/k8s-skaffold/skaffold-example-sub diff --git a/integration/testdata/gcb-sub-folder/skaffold.yaml b/integration/testdata/gcb-sub-folder/skaffold.yaml new file mode 100644 index 00000000000..f0ae43f8b32 --- /dev/null +++ b/integration/testdata/gcb-sub-folder/skaffold.yaml @@ -0,0 +1,7 @@ +apiVersion: skaffold/v1beta4 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example-sub + context: sub + googleCloudBuild: {} diff --git a/integration/testdata/gcb-sub-folder/sub/Dockerfile b/integration/testdata/gcb-sub-folder/sub/Dockerfile new file mode 100644 index 00000000000..184d6cce3e3 --- /dev/null +++ b/integration/testdata/gcb-sub-folder/sub/Dockerfile @@ -0,0 +1,7 @@ +FROM golang:1.10.1-alpine3.7 as builder +COPY main.go . +RUN go build -o /app main.go + +FROM alpine:3.7 +CMD ["./app"] +COPY --from=builder /app . diff --git a/integration/testdata/gcb-sub-folder/sub/main.go b/integration/testdata/gcb-sub-folder/sub/main.go new file mode 100644 index 00000000000..593721cfe2e --- /dev/null +++ b/integration/testdata/gcb-sub-folder/sub/main.go @@ -0,0 +1,14 @@ +package main + +import ( + "fmt" + "time" +) + +func main() { + for { + fmt.Println("Hello world!") + + time.Sleep(time.Second * 1) + } +} diff --git a/integration/testdata/kaniko-sub-folder/k8s/pod.yaml b/integration/testdata/kaniko-sub-folder/k8s/pod.yaml new file mode 100644 index 00000000000..79591bacaa6 --- /dev/null +++ b/integration/testdata/kaniko-sub-folder/k8s/pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: getting-started-kaniko +spec: + containers: + - name: getting-started + image: gcr.io/k8s-skaffold/skaffold-example-sub diff --git a/integration/testdata/kaniko-sub-folder/skaffold.yaml b/integration/testdata/kaniko-sub-folder/skaffold.yaml new file mode 100644 index 00000000000..fbf1900e4a4 --- /dev/null +++ b/integration/testdata/kaniko-sub-folder/skaffold.yaml @@ -0,0 +1,10 @@ +apiVersion: skaffold/v1beta4 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example-sub + context: sub + kaniko: + buildContext: + localDir: {} + pullSecretName: e2esecret diff --git a/integration/testdata/kaniko-sub-folder/sub/Dockerfile b/integration/testdata/kaniko-sub-folder/sub/Dockerfile new file mode 100644 index 00000000000..2d107918ad0 --- /dev/null +++ b/integration/testdata/kaniko-sub-folder/sub/Dockerfile @@ -0,0 +1,6 @@ +FROM gcr.io/google-appengine/golang + +WORKDIR /go/src/github.com/GoogleCloudPlatform/skaffold +CMD ["./app"] +COPY main.go . +RUN go build -o app main.go diff --git a/integration/testdata/kaniko-sub-folder/sub/main.go b/integration/testdata/kaniko-sub-folder/sub/main.go new file mode 100644 index 00000000000..64b7bdfc4a1 --- /dev/null +++ b/integration/testdata/kaniko-sub-folder/sub/main.go @@ -0,0 +1,13 @@ +package main + +import ( + "fmt" + "time" +) + +func main() { + for { + fmt.Println("Hello world!") + time.Sleep(time.Second * 1) + } +} diff --git a/integration/testdata/tagPolicy/Dockerfile b/integration/testdata/tagPolicy/Dockerfile new file mode 100644 index 00000000000..24a79d08b63 --- /dev/null +++ b/integration/testdata/tagPolicy/Dockerfile @@ -0,0 +1 @@ +FROM busybox diff --git a/integration/testdata/tagPolicy/skaffold.yaml b/integration/testdata/tagPolicy/skaffold.yaml new file mode 100644 index 00000000000..3fb8b82c4e7 --- /dev/null +++ b/integration/testdata/tagPolicy/skaffold.yaml @@ -0,0 +1,28 @@ +apiVersion: skaffold/v1beta5 +kind: Config +build: + artifacts: + - image: simple-build + local: + push: false + +profiles: +- name: gitCommit + build: + tagPolicy: + gitCommit: {} +- name: sha256 + build: + tagPolicy: + sha256: {} +- name: dateTime + build: + tagPolicy: + dateTime: + format: "2006-01-02" + timezone: "UTC" +- name: envTemplate + build: + tagPolicy: + envTemplate: + template: "{{.IMAGE_NAME}}:tag" diff --git a/integration/util.go b/integration/util.go new file mode 100644 index 00000000000..d47bd2b2c59 --- /dev/null +++ b/integration/util.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "os" + "os/exec" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +var Client kubernetes.Interface + +func RunSkaffold(t *testing.T, command, dir, namespace, filename string, env []string, additionalArgs ...string) { + if err := RunSkaffoldNoFail(make(chan bool), command, dir, namespace, filename, env, additionalArgs...); err != nil { + t.Fatalf("skaffold delete: %v", err) + } +} + +func RunSkaffoldNoFail(cancel chan bool, command, dir, namespace, filename string, env []string, additionalArgs ...string) error { + args := []string{command, "--namespace", namespace} + if filename != "" { + args = append(args, "-f", filename) + } + args = append(args, additionalArgs...) + + cmd := exec.Command("skaffold", args...) + cmd.Dir = dir + cmd.Env = append(os.Environ(), env...) + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + + cmd.Start() + + result := make(chan error) + go func() { + err := cmd.Wait() + result <- err + }() + + select { + case err := <-result: + return err + case <-cancel: + return cmd.Process.Kill() + } +} + +func Run(t *testing.T, dir, command string, args ...string) { + cmd := exec.Command(command, args...) + cmd.Dir = dir + if output, err := util.RunCmdOut(cmd); err != nil { + t.Fatalf("running command [%s %v]: %s %v", command, args, output, err) + } +} + +func SetupNamespace(t *testing.T) (*v1.Namespace, func()) { + ns, err := Client.CoreV1().Namespaces().Create(&v1.Namespace{ + ObjectMeta: meta_v1.ObjectMeta{ + GenerateName: "skaffold", + }, + }) + if err != nil { + t.Fatalf("creating namespace: %s", err) + } + + return ns, func() { + Client.CoreV1().Namespaces().Delete(ns.Name, &meta_v1.DeleteOptions{}) + } +} diff --git a/pkg/skaffold/apiversion/apiversion.go b/pkg/skaffold/apiversion/apiversion.go index ada0c7626bc..b9326e3e97d 100644 --- a/pkg/skaffold/apiversion/apiversion.go +++ b/pkg/skaffold/apiversion/apiversion.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/apiversion/apiversion_test.go b/pkg/skaffold/apiversion/apiversion_test.go index 4d7cb632a65..05ca2a370f8 100644 --- a/pkg/skaffold/apiversion/apiversion_test.go +++ b/pkg/skaffold/apiversion/apiversion_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/bazel/bazel.go b/pkg/skaffold/bazel/bazel.go index 7f209b819e9..3bd9c7a3075 100644 --- a/pkg/skaffold/bazel/bazel.go +++ b/pkg/skaffold/bazel/bazel.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ import ( "os/exec" "path/filepath" "strings" + "time" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" @@ -39,6 +40,14 @@ func query(target string) string { // GetDependencies finds the sources dependencies for the given bazel artifact. // All paths are relative to the workspace. func GetDependencies(ctx context.Context, workspace string, a *latest.BazelArtifact) ([]string, error) { + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + go func() { + <-timer.C + logrus.Warnln("Retrieving Bazel dependencies can take a long time the first time") + }() + cmd := exec.CommandContext(ctx, "bazel", "query", query(a.BuildTarget), "--noimplicit_deps", "--order_output=no") cmd.Dir = workspace stdout, err := util.RunCmdOut(cmd) diff --git a/pkg/skaffold/bazel/bazel_test.go b/pkg/skaffold/bazel/bazel_test.go index f0ebca5d4a5..ce9ee9b4891 100644 --- a/pkg/skaffold/bazel/bazel_test.go +++ b/pkg/skaffold/bazel/bazel_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/build/bazel/bazel.go b/pkg/skaffold/build/bazel/bazel.go new file mode 100644 index 00000000000..074c8cbbc1d --- /dev/null +++ b/pkg/skaffold/build/bazel/bazel.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bazel + +import ( + "context" + "io" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/bazel" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/local" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + kubectx "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes/context" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" +) + +// Builder builds artifacts with Bazel. +type Builder struct { + opts *config.SkaffoldOptions + env *latest.ExecutionEnvironment +} + +// NewBuilder creates a new Builder that builds artifacts with Bazel. +func NewBuilder() *Builder { + return &Builder{} +} + +// Init stores skaffold options and the execution environment +func (b *Builder) Init(opts *config.SkaffoldOptions, env *latest.ExecutionEnvironment) { + b.opts = opts + b.env = env +} + +// Labels are labels specific to Bazel. +func (b *Builder) Labels() map[string]string { + return map[string]string{ + constants.Labels.Builder: "bazel", + } +} + +// DependenciesForArtifact returns the dependencies for this bazel artifact +func (b *Builder) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + if err := setArtifact(artifact); err != nil { + return nil, err + } + if artifact.BazelArtifact == nil { + return nil, errors.New("bazel artifact is nil") + } + paths, err := bazel.GetDependencies(ctx, artifact.Workspace, artifact.BazelArtifact) + if err != nil { + return nil, errors.Wrap(err, "getting bazel dependencies") + } + return util.AbsolutePaths(artifact.Workspace, paths), nil +} + +// Build is responsible for building artifacts in their respective execution environments +// The builder plugin is also responsible for setting any necessary defaults +func (b *Builder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + switch b.env.Name { + case constants.Local: + return b.local(ctx, out, tags, artifacts) + default: + return nil, errors.Errorf("%s is not a supported environment for builder bazel", b.env.Name) + } +} + +// local sets any necessary defaults and then builds artifacts with bazel locally +func (b *Builder) local(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + var l *latest.LocalBuild + if err := util.CloneThroughJSON(b.env.Properties, &l); err != nil { + return nil, errors.Wrap(err, "converting execution env to localBuild struct") + } + if l == nil { + l = &latest.LocalBuild{} + } + kubeContext, err := kubectx.CurrentContext() + if err != nil { + return nil, errors.Wrap(err, "getting current cluster context") + } + builder, err := local.NewBuilder(l, kubeContext, b.opts.SkipTests) + if err != nil { + return nil, errors.Wrap(err, "getting local builder") + } + for _, a := range artifacts { + if err := setArtifact(a); err != nil { + return nil, errors.Wrapf(err, "setting artifact %s", a.ImageName) + } + } + return builder.Build(ctx, out, tags, artifacts) +} + +func setArtifact(artifact *latest.Artifact) error { + if artifact.ArtifactType.BazelArtifact != nil { + return nil + } + var a *latest.BazelArtifact + if err := yaml.UnmarshalStrict(artifact.BuilderPlugin.Contents, &a); err != nil { + return errors.Wrap(err, "unmarshalling bazel artifact") + } + if a == nil { + return errors.New("artifact is nil") + } + if a.BuildTarget == "" { + return errors.Errorf("%s must have an associated build target", artifact.ImageName) + } + artifact.ArtifactType.BazelArtifact = a + return nil +} diff --git a/pkg/skaffold/build/bazel/bazel_test.go b/pkg/skaffold/build/bazel/bazel_test.go new file mode 100644 index 00000000000..896577376e5 --- /dev/null +++ b/pkg/skaffold/build/bazel/bazel_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bazel + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestSetArtifact(t *testing.T) { + tests := []struct { + name string + initial *latest.Artifact + expected *latest.Artifact + shouldErr bool + }{ + { + name: "set target correctly", + initial: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte("target: myTarget"), + }, + }, + expected: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte("target: myTarget"), + }, + ArtifactType: latest.ArtifactType{ + BazelArtifact: &latest.BazelArtifact{ + BuildTarget: "myTarget", + }, + }, + }, + }, + { + name: "set target and build args correctly", + initial: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte(`target: myTarget +args: + - arg1=arg1 + - arg2=arg2`)}, + }, + expected: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte(`target: myTarget +args: + - arg1=arg1 + - arg2=arg2`)}, + ArtifactType: latest.ArtifactType{ + BazelArtifact: &latest.BazelArtifact{ + BuildTarget: "myTarget", + BuildArgs: []string{ + "arg1=arg1", + "arg2=arg2", + }, + }, + }, + }, + }, + { + name: "no target", + initial: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte(`args: + - arg=arg`)}, + }, + shouldErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setArtifact(test.initial) + if test.shouldErr { + testutil.CheckError(t, test.shouldErr, err) + return + } + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, test.initial) + }) + } +} diff --git a/pkg/skaffold/build/build.go b/pkg/skaffold/build/build.go index e9e2d9473f3..7f400f07473 100644 --- a/pkg/skaffold/build/build.go +++ b/pkg/skaffold/build/build.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,5 +37,7 @@ type Artifact struct { type Builder interface { Labels() map[string]string - Build(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]Artifact, error) + Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]Artifact, error) + + DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) } diff --git a/pkg/skaffold/build/docker/docker.go b/pkg/skaffold/build/docker/docker.go new file mode 100644 index 00000000000..c75c2e1d193 --- /dev/null +++ b/pkg/skaffold/build/docker/docker.go @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "io" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/environments/gcb" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/defaults" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" +) + +// Builder builds artifacts with Docker. +type Builder struct { + opts *config.SkaffoldOptions + env *latest.ExecutionEnvironment +} + +// NewBuilder creates a new Builder that builds artifacts with Docker. +func NewBuilder() *Builder { + return &Builder{} +} + +// Init stores skaffold options and the execution environment +func (b *Builder) Init(opts *config.SkaffoldOptions, env *latest.ExecutionEnvironment) { + b.opts = opts + b.env = env +} + +// Labels are labels specific to Docker. +func (b *Builder) Labels() map[string]string { + return map[string]string{ + constants.Labels.Builder: "docker", + } +} + +// DependenciesForArtifact returns the dependencies for this docker artifact +func (b *Builder) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + if err := setArtifact(artifact); err != nil { + return nil, err + } + paths, err := docker.GetDependencies(ctx, artifact.Workspace, artifact.DockerArtifact) + if err != nil { + return nil, errors.Wrapf(err, "getting dependencies for %s", artifact.ImageName) + } + return util.AbsolutePaths(artifact.Workspace, paths), nil +} + +// Build is responsible for building artifacts in their respective execution environments +// The builder plugin is also responsible for setting any necessary defaults +func (b *Builder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + switch b.env.Name { + case constants.GoogleCloudBuild: + return b.googleCloudBuild(ctx, out, tags, artifacts) + default: + return nil, errors.Errorf("%s is not a supported environment for builder docker", b.env.Name) + } +} + +// googleCloudBuild sets any necessary defaults and then builds artifacts with docker in GCB +func (b *Builder) googleCloudBuild(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + var g *latest.GoogleCloudBuild + if err := util.CloneThroughJSON(b.env.Properties, &g); err != nil { + return nil, errors.Wrap(err, "converting execution environment to googleCloudBuild struct") + } + defaults.SetDefaultCloudBuildDockerImage(g) + for _, a := range artifacts { + if err := setArtifact(a); err != nil { + return nil, err + } + } + return gcb.NewBuilder(g, b.opts.SkipTests).Build(ctx, out, tags, artifacts) +} + +func setArtifact(artifact *latest.Artifact) error { + if artifact.ArtifactType.DockerArtifact != nil { + return nil + } + var a *latest.DockerArtifact + if err := yaml.UnmarshalStrict(artifact.BuilderPlugin.Contents, &a); err != nil { + return errors.Wrap(err, "unmarshalling docker artifact") + } + if a == nil { + a = &latest.DockerArtifact{} + } + defaults.SetDefaultDockerArtifact(a) + artifact.ArtifactType.DockerArtifact = a + return nil +} diff --git a/pkg/skaffold/build/docker/docker_test.go b/pkg/skaffold/build/docker/docker_test.go new file mode 100644 index 00000000000..3cd44056885 --- /dev/null +++ b/pkg/skaffold/build/docker/docker_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestSetArtifact(t *testing.T) { + tests := []struct { + name string + initial *latest.Artifact + expected *latest.Artifact + }{ + { + name: "no contents passed in", + initial: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{}, + }, + expected: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{}, + ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + DockerfilePath: "Dockerfile", + }, + }, + }, + }, + { + name: "set dockerfile path", + initial: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte("dockerfile: path/to/Dockerfile"), + }, + }, + expected: &latest.Artifact{ + ImageName: "image", + BuilderPlugin: &latest.BuilderPlugin{ + Contents: []byte("dockerfile: path/to/Dockerfile"), + }, + ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + DockerfilePath: "path/to/Dockerfile", + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setArtifact(test.initial) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expected, test.initial) + }) + } +} diff --git a/pkg/skaffold/build/gcb/desc.go b/pkg/skaffold/build/gcb/desc.go deleted file mode 100644 index 1858ed4177d..00000000000 --- a/pkg/skaffold/build/gcb/desc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 The Skaffold Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcb - -import ( - "errors" - "fmt" - - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" - cloudbuild "google.golang.org/api/cloudbuild/v1" -) - -func (b *Builder) buildDescription(artifact *latest.Artifact, bucket, object string) (*cloudbuild.Build, error) { - steps, err := b.buildSteps(artifact) - if err != nil { - return nil, err - } - - return &cloudbuild.Build{ - LogsBucket: bucket, - Source: &cloudbuild.Source{ - StorageSource: &cloudbuild.StorageSource{ - Bucket: bucket, - Object: object, - }, - }, - Steps: steps, - Images: []string{artifact.ImageName}, - Options: &cloudbuild.BuildOptions{ - DiskSizeGb: b.DiskSizeGb, - MachineType: b.MachineType, - }, - Timeout: b.Timeout, - }, nil -} - -func (b *Builder) buildSteps(artifact *latest.Artifact) ([]*cloudbuild.BuildStep, error) { - switch { - case artifact.DockerArtifact != nil: - return b.dockerBuildSteps(artifact.ImageName, artifact.DockerArtifact), nil - - case artifact.BazelArtifact != nil: - return nil, errors.New("skaffold can't build a bazel artifact with Google Cloud Build") - - case artifact.JibMavenArtifact != nil: - return nil, errors.New("skaffold can't build a jib maven artifact with Google Cloud Build") - - case artifact.JibGradleArtifact != nil: - return nil, errors.New("skaffold can't build a jib gradle artifact with Google Cloud Build") - - default: - return nil, fmt.Errorf("undefined artifact type: %+v", artifact.ArtifactType) - } -} diff --git a/pkg/skaffold/build/kaniko/kaniko.go b/pkg/skaffold/build/kaniko/kaniko.go index 927106d906b..8ff7b128485 100644 --- a/pkg/skaffold/build/kaniko/kaniko.go +++ b/pkg/skaffold/build/kaniko/kaniko.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,44 +22,34 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/pkg/errors" ) // Build builds a list of artifacts with Kaniko. -func (b *Builder) Build(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]build.Artifact, error) { - teardown, err := b.setupSecret(out) +func (b *Builder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + teardownPullSecret, err := b.setupPullSecret(out) if err != nil { - return nil, errors.Wrap(err, "setting up secret") + return nil, errors.Wrap(err, "setting up pull secret") + } + defer teardownPullSecret() + + if b.DockerConfig != nil { + teardownDockerConfigSecret, err := b.setupDockerConfigSecret(out) + if err != nil { + return nil, errors.Wrap(err, "setting up docker config secret") + } + defer teardownDockerConfigSecret() } - defer teardown() - return build.InParallel(ctx, out, tagger, artifacts, b.buildArtifactWithKaniko) + return build.InParallel(ctx, out, tags, artifacts, b.buildArtifactWithKaniko) } -func (b *Builder) buildArtifactWithKaniko(ctx context.Context, out io.Writer, tagger tag.Tagger, artifact *latest.Artifact) (string, error) { - initialTag, err := b.run(ctx, out, artifact) +func (b *Builder) buildArtifactWithKaniko(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + digest, err := b.run(ctx, out, artifact, tag) if err != nil { return "", errors.Wrapf(err, "kaniko build for [%s]", artifact.ImageName) } - digest, err := docker.RemoteDigest(initialTag) - if err != nil { - return "", errors.Wrap(err, "getting digest") - } - - tag, err := tagger.GenerateFullyQualifiedImageName(artifact.Workspace, tag.Options{ - ImageName: artifact.ImageName, - Digest: digest, - }) - if err != nil { - return "", errors.Wrap(err, "generating tag") - } - - if err := docker.AddTag(initialTag, tag); err != nil { - return "", errors.Wrap(err, "tagging image") - } - - return tag, nil + return tag + "@" + digest, nil } diff --git a/pkg/skaffold/build/kaniko/logs.go b/pkg/skaffold/build/kaniko/logs.go index fcea2a33a81..909d2470965 100644 --- a/pkg/skaffold/build/kaniko/logs.go +++ b/pkg/skaffold/build/kaniko/logs.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/build/kaniko/logs_test.go b/pkg/skaffold/build/kaniko/logs_test.go index 2de52c256ce..fd6abab9bbe 100644 --- a/pkg/skaffold/build/kaniko/logs_test.go +++ b/pkg/skaffold/build/kaniko/logs_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/build/kaniko/run.go b/pkg/skaffold/build/kaniko/run.go index 6043fecbfab..69feb27b2b9 100644 --- a/pkg/skaffold/build/kaniko/run.go +++ b/pkg/skaffold/build/kaniko/run.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,13 +31,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (b *Builder) run(ctx context.Context, out io.Writer, artifact *latest.Artifact) (string, error) { - initialTag := util.RandomID() - imageDst := fmt.Sprintf("%s:%s", artifact.ImageName, initialTag) +func (b *Builder) run(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + if artifact.DockerArtifact == nil { + return "", errors.New("kaniko builder supports only Docker artifacts") + } // Prepare context s := sources.Retrieve(b.KanikoBuild) - context, err := s.Setup(ctx, out, artifact, initialTag) + dependencies, err := b.DependenciesForArtifact(ctx, artifact) + if err != nil { + return "", errors.Wrapf(err, "getting dependencies for %s", artifact.ImageName) + } + context, err := s.Setup(ctx, out, artifact, util.RandomID(), dependencies) if err != nil { return "", errors.Wrap(err, "setting up build context") } @@ -47,7 +52,7 @@ func (b *Builder) run(ctx context.Context, out io.Writer, artifact *latest.Artif args := []string{ "--dockerfile", artifact.DockerArtifact.DockerfilePath, "--context", context, - "--destination", imageDst, + "--destination", tag, "-v", logLevel().String()} args = append(args, b.AdditionalFlags...) args = append(args, docker.GetBuildArgs(artifact.DockerArtifact)...) @@ -92,5 +97,5 @@ func (b *Builder) run(ctx context.Context, out io.Writer, artifact *latest.Artif waitForLogs() - return imageDst, nil + return docker.RemoteDigest(tag) } diff --git a/pkg/skaffold/build/kaniko/secret.go b/pkg/skaffold/build/kaniko/secret.go index 9c4e6a5086b..476cbdd5eb0 100644 --- a/pkg/skaffold/build/kaniko/secret.go +++ b/pkg/skaffold/build/kaniko/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (b *Builder) setupSecret(out io.Writer) (func(), error) { +func (b *Builder) setupPullSecret(out io.Writer) (func(), error) { color.Default.Fprintf(out, "Creating kaniko secret [%s]...\n", b.PullSecretName) client, err := kubernetes.GetClientset() @@ -51,7 +51,7 @@ func (b *Builder) setupSecret(out io.Writer) (func(), error) { secretData, err := ioutil.ReadFile(b.PullSecret) if err != nil { - return nil, errors.Wrap(err, "reading secret") + return nil, errors.Wrap(err, "reading pull secret") } secret := &v1.Secret{ @@ -65,12 +65,62 @@ func (b *Builder) setupSecret(out io.Writer) (func(), error) { } if _, err := secrets.Create(secret); err != nil { - return nil, errors.Wrapf(err, "creating secret: %s", err) + return nil, errors.Wrapf(err, "creating pull secret: %s", err) } return func() { if err := secrets.Delete(b.PullSecretName, &metav1.DeleteOptions{}); err != nil { - logrus.Warnf("deleting secret") + logrus.Warnf("deleting pull secret") + } + }, nil +} + +func (b *Builder) setupDockerConfigSecret(out io.Writer) (func(), error) { + if b.DockerConfig == nil { + return func() {}, nil + } + + color.Default.Fprintf(out, "Creating docker config secret [%s]...\n", b.DockerConfig.SecretName) + + client, err := kubernetes.GetClientset() + if err != nil { + return nil, errors.Wrap(err, "getting kubernetes client") + } + + secrets := client.CoreV1().Secrets(b.Namespace) + + if b.DockerConfig.Path == "" { + logrus.Debug("No docker config specified. Checking for one in the cluster.") + + if _, err := secrets.Get(b.DockerConfig.SecretName, metav1.GetOptions{}); err != nil { + return nil, errors.Wrap(err, "checking for existing kaniko secret") + } + + return func() {}, nil + } + + secretData, err := ioutil.ReadFile(b.DockerConfig.Path) + if err != nil { + return nil, errors.Wrap(err, "reading docker config") + } + + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.DockerConfig.SecretName, + Labels: map[string]string{"skaffold-kaniko": "skaffold-kaniko"}, + }, + Data: map[string][]byte{ + "config.json": secretData, + }, + } + + if _, err := secrets.Create(secret); err != nil { + return nil, errors.Wrapf(err, "creating docker config secret: %s", err) + } + + return func() { + if err := secrets.Delete(b.DockerConfig.SecretName, &metav1.DeleteOptions{}); err != nil { + logrus.Warnf("deleting docker config secret") } }, nil } diff --git a/pkg/skaffold/build/kaniko/sources/gcs.go b/pkg/skaffold/build/kaniko/sources/gcs.go index 082d70f8465..3ed51b6be8c 100644 --- a/pkg/skaffold/build/kaniko/sources/gcs.go +++ b/pkg/skaffold/build/kaniko/sources/gcs.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ import ( cstorage "cloud.google.com/go/storage" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/gcp" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sources" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" ) @@ -36,7 +36,7 @@ type GCSBucket struct { } // Setup uploads the context to the provided GCS bucket -func (g *GCSBucket) Setup(ctx context.Context, out io.Writer, artifact *latest.Artifact, initialTag string) (string, error) { +func (g *GCSBucket) Setup(ctx context.Context, out io.Writer, artifact *latest.Artifact, initialTag string, dependencies []string) (string, error) { bucket := g.cfg.BuildContext.GCSBucket if bucket == "" { guessedProjectID, err := gcp.ExtractProjectID(artifact.ImageName) @@ -50,7 +50,7 @@ func (g *GCSBucket) Setup(ctx context.Context, out io.Writer, artifact *latest.A color.Default.Fprintln(out, "Uploading sources to", bucket, "GCS bucket") g.tarName = fmt.Sprintf("context-%s.tar.gz", initialTag) - if err := docker.UploadContextToGCS(ctx, artifact.Workspace, artifact.DockerArtifact, bucket, g.tarName); err != nil { + if err := sources.UploadToGCS(ctx, artifact, bucket, g.tarName, dependencies); err != nil { return "", errors.Wrap(err, "uploading sources to GCS") } diff --git a/pkg/skaffold/build/kaniko/sources/localdir.go b/pkg/skaffold/build/kaniko/sources/localdir.go index c6b1d1a27b2..bde54bee1b9 100644 --- a/pkg/skaffold/build/kaniko/sources/localdir.go +++ b/pkg/skaffold/build/kaniko/sources/localdir.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,9 +29,9 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sources" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" ) @@ -47,7 +47,7 @@ type LocalDir struct { } // Setup for LocalDir creates a tarball of the buildcontext and stores it in /tmp -func (g *LocalDir) Setup(ctx context.Context, out io.Writer, artifact *latest.Artifact, initialTag string) (string, error) { +func (g *LocalDir) Setup(ctx context.Context, out io.Writer, artifact *latest.Artifact, initialTag string, dependencies []string) (string, error) { g.tarPath = filepath.Join(os.TempDir(), fmt.Sprintf("context-%s.tar.gz", initialTag)) color.Default.Fprintln(out, "Storing build context at", g.tarPath) @@ -57,7 +57,7 @@ func (g *LocalDir) Setup(ctx context.Context, out io.Writer, artifact *latest.Ar } defer f.Close() - err = docker.CreateDockerTarGzContext(ctx, f, artifact.Workspace, artifact.DockerArtifact) + err = sources.TarGz(ctx, f, artifact, dependencies) context := fmt.Sprintf("dir://%s", constants.DefaultKanikoEmptyDirMountPath) return context, err diff --git a/pkg/skaffold/build/kaniko/sources/sources.go b/pkg/skaffold/build/kaniko/sources/sources.go index 958607a08da..230cbdc6937 100644 --- a/pkg/skaffold/build/kaniko/sources/sources.go +++ b/pkg/skaffold/build/kaniko/sources/sources.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ import ( // BuildContextSource is the generic type for the different build context sources the kaniko builder can use type BuildContextSource interface { - Setup(ctx context.Context, out io.Writer, artifact *latest.Artifact, initialTag string) (string, error) + Setup(ctx context.Context, out io.Writer, artifact *latest.Artifact, initialTag string, dependencies []string) (string, error) Pod(args []string) *v1.Pod ModifyPod(ctx context.Context, p *v1.Pod) error Cleanup(ctx context.Context) error @@ -48,7 +48,7 @@ func Retrieve(cfg *latest.KanikoBuild) BuildContextSource { } func podTemplate(cfg *latest.KanikoBuild, args []string) *v1.Pod { - return &v1.Pod{ + pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "kaniko-", Labels: map[string]string{"skaffold-kaniko": "skaffold-kaniko"}, @@ -58,7 +58,7 @@ func podTemplate(cfg *latest.KanikoBuild, args []string) *v1.Pod { Containers: []v1.Container{ { Name: constants.DefaultKanikoContainerName, - Image: constants.DefaultKanikoImage, + Image: cfg.Image, Args: args, ImagePullPolicy: v1.PullIfNotPresent, Env: []v1.EnvVar{{ @@ -81,7 +81,32 @@ func podTemplate(cfg *latest.KanikoBuild, args []string) *v1.Pod { SecretName: cfg.PullSecretName, }, }, - }}, + }, + }, }, } + + if cfg.DockerConfig == nil { + return pod + } + + volumeMount := v1.VolumeMount{ + Name: constants.DefaultKanikoDockerConfigSecretName, + MountPath: constants.DefaultKanikoDockerConfigPath, + } + + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, volumeMount) + + volume := v1.Volume{ + Name: constants.DefaultKanikoDockerConfigSecretName, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: cfg.DockerConfig.SecretName, + }, + }, + } + + pod.Spec.Volumes = append(pod.Spec.Volumes, volume) + + return pod } diff --git a/pkg/skaffold/build/kaniko/types.go b/pkg/skaffold/build/kaniko/types.go index d40fc1baf71..c3d010fa2f4 100644 --- a/pkg/skaffold/build/kaniko/types.go +++ b/pkg/skaffold/build/kaniko/types.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +17,13 @@ limitations under the License. package kaniko import ( + "context" "time" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/pkg/errors" ) @@ -50,3 +53,12 @@ func (b *Builder) Labels() map[string]string { constants.Labels.Builder: "kaniko", } } + +// DependenciesForArtifact returns the Dockerfile dependencies for this kaniko artifact +func (b *Builder) DependenciesForArtifact(ctx context.Context, a *latest.Artifact) ([]string, error) { + paths, err := docker.GetDependencies(ctx, a.Workspace, a.DockerArtifact) + if err != nil { + return nil, errors.Wrapf(err, "getting dependencies for %s", a.ImageName) + } + return util.AbsolutePaths(a.Workspace, paths), nil +} diff --git a/pkg/skaffold/build/local/bazel.go b/pkg/skaffold/build/local/bazel.go index 47ee8dc093d..62b523568dd 100644 --- a/pkg/skaffold/build/local/bazel.go +++ b/pkg/skaffold/build/local/bazel.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,21 +20,28 @@ import ( "context" "fmt" "io" + "net/http" "os" "os/exec" "path/filepath" "strings" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/tarball" "github.com/pkg/errors" ) -func (b *Builder) buildBazel(ctx context.Context, out io.Writer, workspace string, a *latest.BazelArtifact) (string, error) { +func (b *Builder) buildBazel(ctx context.Context, out io.Writer, workspace string, a *latest.BazelArtifact, tag string) (string, error) { args := []string{"build"} args = append(args, a.BuildArgs...) args = append(args, a.BuildTarget) + // FIXME: is it possible to apply b.skipTests? cmd := exec.CommandContext(ctx, "bazel", args...) cmd.Dir = workspace cmd.Stdout = out @@ -43,30 +50,68 @@ func (b *Builder) buildBazel(ctx context.Context, out io.Writer, workspace strin return "", errors.Wrap(err, "running command") } - bazelBin, err := bazelBin(ctx, workspace) + bazelBin, err := bazelBin(ctx, workspace, a) if err != nil { return "", errors.Wrap(err, "getting path of bazel-bin") } - tarPath := buildTarPath(a.BuildTarget) - imageTar, err := os.Open(filepath.Join(bazelBin, tarPath)) + tarPath := filepath.Join(bazelBin, buildTarPath(a.BuildTarget)) + + if b.pushImages { + return pushImage(tarPath, tag) + } + + return b.loadImage(ctx, out, tarPath, a, tag) +} + +func pushImage(tarPath, tag string) (string, error) { + t, err := name.NewTag(tag, name.WeakValidation) + if err != nil { + return "", errors.Wrapf(err, "parsing tag %q", tag) + } + + auth, err := authn.DefaultKeychain.Resolve(t.Registry) + if err != nil { + return "", errors.Wrapf(err, "getting creds for %q", t) + } + + i, err := tarball.ImageFromPath(tarPath, nil) + if err != nil { + return "", errors.Wrapf(err, "reading image %q", tarPath) + } + + if err := remote.Write(t, i, auth, http.DefaultTransport); err != nil { + return "", errors.Wrapf(err, "writing image %q", t) + } + + return docker.RemoteDigest(tag) +} + +func (b *Builder) loadImage(ctx context.Context, out io.Writer, tarPath string, a *latest.BazelArtifact, tag string) (string, error) { + imageTar, err := os.Open(tarPath) if err != nil { return "", errors.Wrap(err, "opening image tarball") } defer imageTar.Close() - ref := buildImageTag(a.BuildTarget) - - imageID, err := b.localDocker.Load(ctx, out, imageTar, ref) + bazelTag := buildImageTag(a.BuildTarget) + imageID, err := b.localDocker.Load(ctx, out, imageTar, bazelTag) if err != nil { return "", errors.Wrap(err, "loading image into docker daemon") } + if err := b.localDocker.Tag(ctx, imageID, tag); err != nil { + return "", errors.Wrap(err, "tagging the image") + } + return imageID, nil } -func bazelBin(ctx context.Context, workspace string) (string, error) { - cmd := exec.CommandContext(ctx, "bazel", "info", "bazel-bin") +func bazelBin(ctx context.Context, workspace string, a *latest.BazelArtifact) (string, error) { + args := []string{"info", "bazel-bin"} + args = append(args, a.BuildArgs...) + + cmd := exec.CommandContext(ctx, "bazel", args...) cmd.Dir = workspace buf, err := util.RunCmdOut(cmd) diff --git a/pkg/skaffold/build/local/bazel_test.go b/pkg/skaffold/build/local/bazel_test.go index 7eedc1e5e52..1e06ba6cc60 100644 --- a/pkg/skaffold/build/local/bazel_test.go +++ b/pkg/skaffold/build/local/bazel_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "context" "testing" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/GoogleContainerTools/skaffold/testutil" ) @@ -27,11 +28,13 @@ import ( func TestBazelBin(t *testing.T) { defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) util.DefaultExecCommand = testutil.NewFakeCmd(t).WithRunOut( - "bazel info bazel-bin", + "bazel info bazel-bin --arg1 --arg2", "/absolute/path/bin\n", ) - bazelBin, err := bazelBin(context.Background(), ".") + bazelBin, err := bazelBin(context.Background(), ".", &latest.BazelArtifact{ + BuildArgs: []string{"--arg1", "--arg2"}, + }) testutil.CheckErrorAndDeepEqual(t, false, err, "/absolute/path/bin", bazelBin) } diff --git a/pkg/skaffold/build/local/docker.go b/pkg/skaffold/build/local/docker.go index e828e6ff366..629f1ba2105 100644 --- a/pkg/skaffold/build/local/docker.go +++ b/pkg/skaffold/build/local/docker.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,34 +25,75 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/warnings" "github.com/pkg/errors" ) -func (b *Builder) buildDocker(ctx context.Context, out io.Writer, workspace string, a *latest.DockerArtifact) (string, error) { - initialTag := util.RandomID() +func (b *Builder) buildDocker(ctx context.Context, out io.Writer, workspace string, a *latest.DockerArtifact, tag string) (string, error) { + if err := b.pullCacheFromImages(ctx, out, a); err != nil { + return "", errors.Wrap(err, "pulling cache-from images") + } + + var ( + imageID string + err error + ) if b.cfg.UseDockerCLI || b.cfg.UseBuildkit { - dockerfilePath, err := docker.NormalizeDockerfilePath(workspace, a.DockerfilePath) - if err != nil { - return "", errors.Wrap(err, "normalizing dockerfile path") - } + imageID, err = b.dockerCLIBuild(ctx, out, workspace, a, tag) + } else { + imageID, err = b.localDocker.Build(ctx, out, workspace, a, tag) + } - args := []string{"build", workspace, "--file", dockerfilePath, "-t", initialTag} - args = append(args, docker.GetBuildArgs(a)...) + if b.pushImages { + return b.localDocker.Push(ctx, out, tag) + } - cmd := exec.CommandContext(ctx, "docker", args...) - if b.cfg.UseBuildkit { - cmd.Env = append(os.Environ(), "DOCKER_BUILDKIT=1") - } - cmd.Stdout = out - cmd.Stderr = out + return imageID, err +} + +func (b *Builder) dockerCLIBuild(ctx context.Context, out io.Writer, workspace string, a *latest.DockerArtifact, tag string) (string, error) { + dockerfilePath, err := docker.NormalizeDockerfilePath(workspace, a.DockerfilePath) + if err != nil { + return "", errors.Wrap(err, "normalizing dockerfile path") + } + + args := []string{"build", workspace, "--file", dockerfilePath, "-t", tag} + args = append(args, docker.GetBuildArgs(a)...) - if err := util.RunCmd(cmd); err != nil { - return "", errors.Wrap(err, "running build") + cmd := exec.CommandContext(ctx, "docker", args...) + if b.cfg.UseBuildkit { + cmd.Env = append(os.Environ(), "DOCKER_BUILDKIT=1") + } + cmd.Stdout = out + cmd.Stderr = out + + if err := util.RunCmd(cmd); err != nil { + return "", errors.Wrap(err, "running build") + } + + return b.localDocker.ImageID(ctx, tag) +} + +func (b *Builder) pullCacheFromImages(ctx context.Context, out io.Writer, a *latest.DockerArtifact) error { + if len(a.CacheFrom) == 0 { + return nil + } + + for _, image := range a.CacheFrom { + imageID, err := b.localDocker.ImageID(ctx, image) + if err != nil { + return errors.Wrapf(err, "getting imageID for %s", image) + } + if imageID != "" { + // already pulled + continue } - return b.localDocker.ImageID(ctx, initialTag) + if err := b.localDocker.Pull(ctx, out, image); err != nil { + warnings.Printf("Cache-From image couldn't be pulled: %s\n", image) + } } - return b.localDocker.Build(ctx, out, workspace, a, initialTag) + return nil } diff --git a/pkg/skaffold/build/local/jib.go b/pkg/skaffold/build/local/jib.go deleted file mode 100644 index f16d074cf6f..00000000000 --- a/pkg/skaffold/build/local/jib.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2018 The Skaffold Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package local - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "regexp" - - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" -) - -// jibBuildImageRef generates a valid image name for the workspace and project. -// The image name is always prefixed with `jib`. -func generateJibImageRef(workspace string, project string) string { - imageName := "jib" + workspace - if project != "" { - imageName += "_" + project - } - // if the workspace + project is a valid image name then use it - if regexp.MustCompile(constants.RepositoryComponentRegex).MatchString(imageName) { - return imageName - } - // otherwise use a hash for a deterministic name - hasher := sha1.New() - io.WriteString(hasher, imageName) - return "jib__" + hex.EncodeToString(hasher.Sum(nil)) -} diff --git a/pkg/skaffold/build/local/jib_gradle.go b/pkg/skaffold/build/local/jib_gradle.go index e536ff3036c..026caa2897a 100644 --- a/pkg/skaffold/build/local/jib_gradle.go +++ b/pkg/skaffold/build/local/jib_gradle.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,8 +18,8 @@ package local import ( "context" - "fmt" "io" + "os" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/jib" @@ -29,38 +29,34 @@ import ( "github.com/sirupsen/logrus" ) -func (b *Builder) buildJibGradle(ctx context.Context, out io.Writer, workspace string, artifact *latest.Artifact) (string, error) { +func (b *Builder) buildJibGradle(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibGradleArtifact, tag string) (string, error) { if b.pushImages { - return b.buildJibGradleToRegistry(ctx, out, workspace, artifact) + return b.buildJibGradleToRegistry(ctx, out, workspace, artifact, tag) } - return b.buildJibGradleToDocker(ctx, out, workspace, artifact.JibGradleArtifact) + return b.buildJibGradleToDocker(ctx, out, workspace, artifact, tag) } -func (b *Builder) buildJibGradleToDocker(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibGradleArtifact) (string, error) { - skaffoldImage := generateJibImageRef(workspace, artifact.Project) - args := jib.GenerateGradleArgs("jibDockerBuild", skaffoldImage, artifact) - - if err := runGradleCommand(ctx, out, workspace, args); err != nil { +func (b *Builder) buildJibGradleToDocker(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibGradleArtifact, tag string) (string, error) { + args := jib.GenerateGradleArgs("jibDockerBuild", tag, artifact, b.skipTests) + if err := b.runGradleCommand(ctx, out, workspace, args); err != nil { return "", err } - return b.localDocker.ImageID(ctx, skaffoldImage) + return b.localDocker.ImageID(ctx, tag) } -func (b *Builder) buildJibGradleToRegistry(ctx context.Context, out io.Writer, workspace string, artifact *latest.Artifact) (string, error) { - initialTag := util.RandomID() - skaffoldImage := fmt.Sprintf("%s:%s", artifact.ImageName, initialTag) - args := jib.GenerateGradleArgs("jib", skaffoldImage, artifact.JibGradleArtifact) - - if err := runGradleCommand(ctx, out, workspace, args); err != nil { +func (b *Builder) buildJibGradleToRegistry(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibGradleArtifact, tag string) (string, error) { + args := jib.GenerateGradleArgs("jib", tag, artifact, b.skipTests) + if err := b.runGradleCommand(ctx, out, workspace, args); err != nil { return "", err } - return docker.RemoteDigest(skaffoldImage) + return docker.RemoteDigest(tag) } -func runGradleCommand(ctx context.Context, out io.Writer, workspace string, args []string) error { +func (b *Builder) runGradleCommand(ctx context.Context, out io.Writer, workspace string, args []string) error { cmd := jib.GradleCommand.CreateCommand(ctx, workspace, args) + cmd.Env = append(os.Environ(), b.localDocker.ExtraEnv()...) cmd.Stdout = out cmd.Stderr = out diff --git a/pkg/skaffold/build/local/jib_maven.go b/pkg/skaffold/build/local/jib_maven.go index 964c95af51c..ca36866a4c6 100644 --- a/pkg/skaffold/build/local/jib_maven.go +++ b/pkg/skaffold/build/local/jib_maven.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,7 @@ package local import ( "context" "io" - - "fmt" + "os" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/jib" @@ -30,14 +29,14 @@ import ( "github.com/sirupsen/logrus" ) -func (b *Builder) buildJibMaven(ctx context.Context, out io.Writer, workspace string, artifact *latest.Artifact) (string, error) { +func (b *Builder) buildJibMaven(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibMavenArtifact, tag string) (string, error) { if b.pushImages { - return b.buildJibMavenToRegistry(ctx, out, workspace, artifact) + return b.buildJibMavenToRegistry(ctx, out, workspace, artifact, tag) } - return b.buildJibMavenToDocker(ctx, out, workspace, artifact.JibMavenArtifact) + return b.buildJibMavenToDocker(ctx, out, workspace, artifact, tag) } -func (b *Builder) buildJibMavenToDocker(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibMavenArtifact) (string, error) { +func (b *Builder) buildJibMavenToDocker(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibMavenArtifact, tag string) (string, error) { // If this is a multi-module project, we require `package` be bound to jib:dockerBuild if artifact.Module != "" { if err := verifyJibPackageGoal(ctx, "dockerBuild", workspace, artifact); err != nil { @@ -45,33 +44,28 @@ func (b *Builder) buildJibMavenToDocker(ctx context.Context, out io.Writer, work } } - skaffoldImage := generateJibImageRef(workspace, artifact.Module) - args := jib.GenerateMavenArgs("dockerBuild", skaffoldImage, artifact) - - if err := runMavenCommand(ctx, out, workspace, args); err != nil { + args := jib.GenerateMavenArgs("dockerBuild", tag, artifact, b.skipTests) + if err := b.runMavenCommand(ctx, out, workspace, args); err != nil { return "", err } - return b.localDocker.ImageID(ctx, skaffoldImage) + return b.localDocker.ImageID(ctx, tag) } -func (b *Builder) buildJibMavenToRegistry(ctx context.Context, out io.Writer, workspace string, artifact *latest.Artifact) (string, error) { +func (b *Builder) buildJibMavenToRegistry(ctx context.Context, out io.Writer, workspace string, artifact *latest.JibMavenArtifact, tag string) (string, error) { // If this is a multi-module project, we require `package` be bound to jib:build - if artifact.JibMavenArtifact.Module != "" { - if err := verifyJibPackageGoal(ctx, "build", workspace, artifact.JibMavenArtifact); err != nil { + if artifact.Module != "" { + if err := verifyJibPackageGoal(ctx, "build", workspace, artifact); err != nil { return "", err } } - initialTag := util.RandomID() - skaffoldImage := fmt.Sprintf("%s:%s", artifact.ImageName, initialTag) - args := jib.GenerateMavenArgs("build", skaffoldImage, artifact.JibMavenArtifact) - - if err := runMavenCommand(ctx, out, workspace, args); err != nil { + args := jib.GenerateMavenArgs("build", tag, artifact, b.skipTests) + if err := b.runMavenCommand(ctx, out, workspace, args); err != nil { return "", err } - return docker.RemoteDigest(skaffoldImage) + return docker.RemoteDigest(tag) } // verifyJibPackageGoal verifies that the referenced module has `package` bound to a single jib goal. @@ -100,8 +94,9 @@ func verifyJibPackageGoal(ctx context.Context, requiredGoal string, workspace st return nil } -func runMavenCommand(ctx context.Context, out io.Writer, workspace string, args []string) error { +func (b *Builder) runMavenCommand(ctx context.Context, out io.Writer, workspace string, args []string) error { cmd := jib.MavenCommand.CreateCommand(ctx, workspace, args) + cmd.Env = append(os.Environ(), b.localDocker.ExtraEnv()...) cmd.Stdout = out cmd.Stderr = out diff --git a/pkg/skaffold/build/local/jib_test.go b/pkg/skaffold/build/local/jib_maven_test.go similarity index 76% rename from pkg/skaffold/build/local/jib_test.go rename to pkg/skaffold/build/local/jib_maven_test.go index 94fab894b89..a887c3e29d3 100644 --- a/pkg/skaffold/build/local/jib_test.go +++ b/pkg/skaffold/build/local/jib_maven_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -52,22 +52,3 @@ func TestMavenVerifyJibPackageGoal(t *testing.T) { } } } - -func TestGenerateJibImageRef(t *testing.T) { - var testCases = []struct { - workspace string - project string - out string - }{ - {"simple", "", "jibsimple"}, - {"simple", "project", "jibsimple_project"}, - {".", "project", "jib__d8c7cbe8892fe8442b7f6ef42026769ee6a01e67"}, - {"complex/workspace", "project", "jib__965ec099f720d3ccc9c038c21ea4a598c9632883"}, - } - - for _, tt := range testCases { - computed := generateJibImageRef(tt.workspace, tt.project) - - testutil.CheckDeepEqual(t, tt.out, computed) - } -} diff --git a/pkg/skaffold/build/local/local.go b/pkg/skaffold/build/local/local.go index cbe075f407d..3473bfca164 100644 --- a/pkg/skaffold/build/local/local.go +++ b/pkg/skaffold/build/local/local.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,93 +20,108 @@ import ( "context" "fmt" "io" + "strings" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/bazel" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/jib" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // Build runs a docker build on the host and tags the resulting image with // its checksum. It streams build progress to the writer argument. -func (b *Builder) Build(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]build.Artifact, error) { +func (b *Builder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { if b.localCluster { color.Default.Fprintf(out, "Found [%s] context, using local docker daemon.\n", b.kubeContext) } defer b.localDocker.Close() // TODO(dgageot): parallel builds - return build.InSequence(ctx, out, tagger, artifacts, b.buildArtifact) + return build.InSequence(ctx, out, tags, artifacts, b.buildArtifact) } -func (b *Builder) buildArtifact(ctx context.Context, out io.Writer, tagger tag.Tagger, artifact *latest.Artifact) (string, error) { - digest, err := b.runBuildForArtifact(ctx, out, artifact) +func (b *Builder) buildArtifact(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + digestOrImageID, err := b.runBuildForArtifact(ctx, out, artifact, tag) if err != nil { return "", errors.Wrap(err, "build artifact") } - if b.alreadyTagged == nil { - b.alreadyTagged = make(map[string]string) - } - if tag, present := b.alreadyTagged[digest]; present { - return tag, nil - } - - tag, err := tagger.GenerateFullyQualifiedImageName(artifact.Workspace, tag.Options{ - ImageName: artifact.ImageName, - Digest: digest, - }) - if err != nil { - return "", errors.Wrap(err, "generating tag") + if b.pushImages { + digest := digestOrImageID + return tag + "@" + digest, nil } - if err := b.retagAndPush(ctx, out, digest, tag, artifact); err != nil { - return "", errors.Wrap(err, "tagging") + // k8s doesn't recognize the imageID or any combination of the image name + // suffixed with the imageID, as a valid image name. + // So, the solution we chose is to create a tag, just for Skaffold, from + // the imageID, and use that in the manifests. + imageID := digestOrImageID + uniqueTag := artifact.ImageName + ":" + strings.TrimPrefix(imageID, "sha256:") + if err := b.localDocker.Tag(ctx, imageID, uniqueTag); err != nil { + return "", err } - b.alreadyTagged[digest] = tag - - return tag, nil + return uniqueTag, nil } -func (b *Builder) runBuildForArtifact(ctx context.Context, out io.Writer, artifact *latest.Artifact) (string, error) { +func (b *Builder) runBuildForArtifact(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { switch { case artifact.DockerArtifact != nil: - return b.buildDocker(ctx, out, artifact.Workspace, artifact.DockerArtifact) + return b.buildDocker(ctx, out, artifact.Workspace, artifact.DockerArtifact, tag) case artifact.BazelArtifact != nil: - return b.buildBazel(ctx, out, artifact.Workspace, artifact.BazelArtifact) + return b.buildBazel(ctx, out, artifact.Workspace, artifact.BazelArtifact, tag) case artifact.JibMavenArtifact != nil: - return b.buildJibMaven(ctx, out, artifact.Workspace, artifact) + return b.buildJibMaven(ctx, out, artifact.Workspace, artifact.JibMavenArtifact, tag) case artifact.JibGradleArtifact != nil: - return b.buildJibGradle(ctx, out, artifact.Workspace, artifact) + return b.buildJibGradle(ctx, out, artifact.Workspace, artifact.JibGradleArtifact, tag) default: return "", fmt.Errorf("undefined artifact type: %+v", artifact.ArtifactType) } } -func (b *Builder) retagAndPush(ctx context.Context, out io.Writer, initialTag string, newTag string, artifact *latest.Artifact) error { - if b.pushImages && (artifact.JibMavenArtifact != nil || artifact.JibGradleArtifact != nil) { - if err := docker.AddTag(initialTag, newTag); err != nil { - return errors.Wrap(err, "tagging image") - } - return nil - } +func (b *Builder) DependenciesForArtifact(ctx context.Context, a *latest.Artifact) ([]string, error) { + var ( + paths []string + err error + ) - if err := b.localDocker.Tag(ctx, initialTag, newTag); err != nil { - return err + switch { + case a.DockerArtifact != nil: + paths, err = docker.GetDependencies(ctx, a.Workspace, a.DockerArtifact) + + case a.BazelArtifact != nil: + paths, err = bazel.GetDependencies(ctx, a.Workspace, a.BazelArtifact) + + case a.JibMavenArtifact != nil: + paths, err = jib.GetDependenciesMaven(ctx, a.Workspace, a.JibMavenArtifact) + + case a.JibGradleArtifact != nil: + paths, err = jib.GetDependenciesGradle(ctx, a.Workspace, a.JibGradleArtifact) + + default: + return nil, fmt.Errorf("undefined artifact type: %+v", a.ArtifactType) } - if b.pushImages { - if _, err := b.localDocker.Push(ctx, out, newTag); err != nil { - return errors.Wrap(err, "pushing") + if err != nil { + // if the context was cancelled act as if all is well + // TODO(dgageot): this should be even higher in the call chain. + if ctx.Err() == context.Canceled { + logrus.Debugln(errors.Wrap(err, "ignore error since context is cancelled")) + return nil, nil } + + return nil, err } - return nil + return util.AbsolutePaths(a.Workspace, paths), nil } diff --git a/pkg/skaffold/build/local/local_test.go b/pkg/skaffold/build/local/local_test.go index 63594cf5acf..e47d72577d4 100644 --- a/pkg/skaffold/build/local/local_test.go +++ b/pkg/skaffold/build/local/local_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ package local import ( "context" - "fmt" "io/ioutil" "testing" @@ -26,23 +25,11 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/warnings" "github.com/GoogleContainerTools/skaffold/testutil" "github.com/docker/docker/api/types" ) -type FakeTagger struct { - Out string - Err error -} - -func (f *FakeTagger) GenerateFullyQualifiedImageName(workingDir string, tagOpts tag.Options) (string, error) { - return f.Out, f.Err -} - -func (f *FakeTagger) Labels() map[string]string { - return map[string]string{} -} - type testAuthHelper struct{} func (t testAuthHelper) GetAuthConfig(string) (types.AuthConfig, error) { @@ -55,55 +42,45 @@ func TestLocalRun(t *testing.T) { docker.DefaultAuthHelper = testAuthHelper{} var tests = []struct { - description string - api testutil.FakeAPIClient - tagger tag.Tagger - artifacts []*latest.Artifact - expected []build.Artifact - localCluster bool - shouldErr bool + description string + api testutil.FakeAPIClient + tags tag.ImageTags + artifacts []*latest.Artifact + expected []build.Artifact + expectedWarnings []string + pushImages bool + shouldErr bool }{ { - description: "single build", - artifacts: []*latest.Artifact{{ - ImageName: "gcr.io/test/image", - ArtifactType: latest.ArtifactType{ - DockerArtifact: &latest.DockerArtifact{}, - }}, - }, - tagger: &FakeTagger{Out: "gcr.io/test/image:tag"}, - expected: []build.Artifact{{ - ImageName: "gcr.io/test/image", - Tag: "gcr.io/test/image:tag", - }}, - }, - { - description: "single build local cluster", + description: "single build (local)", artifacts: []*latest.Artifact{{ ImageName: "gcr.io/test/image", ArtifactType: latest.ArtifactType{ DockerArtifact: &latest.DockerArtifact{}, }}, }, - tagger: &FakeTagger{Out: "gcr.io/test/image:tag"}, - localCluster: true, + tags: tag.ImageTags(map[string]string{"gcr.io/test/image": "gcr.io/test/image:tag"}), + api: testutil.FakeAPIClient{}, + pushImages: false, expected: []build.Artifact{{ ImageName: "gcr.io/test/image", - Tag: "gcr.io/test/image:tag", + Tag: "gcr.io/test/image:1", }}, }, { - description: "subset build", - tagger: &FakeTagger{Out: "gcr.io/test/image:tag"}, + description: "single build (remote)", artifacts: []*latest.Artifact{{ ImageName: "gcr.io/test/image", ArtifactType: latest.ArtifactType{ DockerArtifact: &latest.DockerArtifact{}, }}, }, + tags: tag.ImageTags(map[string]string{"gcr.io/test/image": "gcr.io/test/image:tag"}), + api: testutil.FakeAPIClient{}, + pushImages: true, expected: []build.Artifact{{ ImageName: "gcr.io/test/image", - Tag: "gcr.io/test/image:tag", + Tag: "gcr.io/test/image:tag@sha256:7368613235363a31e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }}, }, { @@ -114,14 +91,6 @@ func TestLocalRun(t *testing.T) { }, shouldErr: true, }, - { - description: "error image tag", - artifacts: []*latest.Artifact{{}}, - api: testutil.FakeAPIClient{ - ErrImageTag: true, - }, - shouldErr: true, - }, { description: "unkown artifact type", artifacts: []*latest.Artifact{{}}, @@ -136,23 +105,97 @@ func TestLocalRun(t *testing.T) { shouldErr: true, }, { - description: "error tagger", - artifacts: []*latest.Artifact{{}}, - tagger: &FakeTagger{Err: fmt.Errorf("")}, - shouldErr: true, + description: "cache-from images already pulled", + artifacts: []*latest.Artifact{{ + ImageName: "gcr.io/test/image", + ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + CacheFrom: []string{"pull1", "pull2"}, + }, + }}, + }, + api: testutil.FakeAPIClient{ + TagToImageID: map[string]string{ + "pull1": "imageID1", + "pull2": "imageID2", + }, + }, + tags: tag.ImageTags(map[string]string{"gcr.io/test/image": "gcr.io/test/image:tag"}), + expected: []build.Artifact{{ + ImageName: "gcr.io/test/image", + Tag: "gcr.io/test/image:1", + }}, + }, + { + description: "pull cache-from images", + artifacts: []*latest.Artifact{{ + ImageName: "gcr.io/test/image", + ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + CacheFrom: []string{"pull1", "pull2"}, + }, + }}, + }, + api: testutil.FakeAPIClient{}, + tags: tag.ImageTags(map[string]string{"gcr.io/test/image": "gcr.io/test/image:tag"}), + expected: []build.Artifact{{ + ImageName: "gcr.io/test/image", + Tag: "gcr.io/test/image:1", + }}, + }, + { + description: "ignore cache-from pull error", + artifacts: []*latest.Artifact{{ + ImageName: "gcr.io/test/image", + ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + CacheFrom: []string{"pull1"}, + }, + }}, + }, + api: testutil.FakeAPIClient{ + ErrImagePull: true, + }, + tags: tag.ImageTags(map[string]string{"gcr.io/test/image": "gcr.io/test/image:tag"}), + expected: []build.Artifact{{ + ImageName: "gcr.io/test/image", + Tag: "gcr.io/test/image:1", + }}, + expectedWarnings: []string{"Cache-From image couldn't be pulled: pull1\n"}, + }, + { + description: "inspect error", + artifacts: []*latest.Artifact{{ + ImageName: "gcr.io/test/image", + ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + CacheFrom: []string{"pull1"}, + }, + }}, + }, + api: testutil.FakeAPIClient{ + ErrImageInspect: true, + }, + shouldErr: true, }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { + defer func(w warnings.Warner) { warnings.Printf = w }(warnings.Printf) + fakeWarner := &warnings.Collect{} + warnings.Printf = fakeWarner.Warnf + l := Builder{ - cfg: &latest.LocalBuild{}, - localDocker: docker.NewLocalDaemon(&test.api), - localCluster: test.localCluster, + cfg: &latest.LocalBuild{}, + localDocker: docker.NewLocalDaemon(&test.api, nil), + pushImages: test.pushImages, } - res, err := l.Build(context.Background(), ioutil.Discard, test.tagger, test.artifacts) + res, err := l.Build(context.Background(), ioutil.Discard, test.tags, test.artifacts) + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, res) + testutil.CheckDeepEqual(t, test.expectedWarnings, fakeWarner.Warnings) }) } } diff --git a/pkg/skaffold/build/local/types.go b/pkg/skaffold/build/local/types.go index 0c6ed1f032a..81ca43dd691 100644 --- a/pkg/skaffold/build/local/types.go +++ b/pkg/skaffold/build/local/types.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "context" "fmt" + configutil "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/cmd/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" @@ -34,19 +35,21 @@ type Builder struct { localDocker docker.LocalDaemon localCluster bool pushImages bool + skipTests bool kubeContext string - - alreadyTagged map[string]string } // NewBuilder returns an new instance of a local Builder. -func NewBuilder(cfg *latest.LocalBuild, kubeContext string) (*Builder, error) { +func NewBuilder(cfg *latest.LocalBuild, kubeContext string, skipTests bool) (*Builder, error) { localDocker, err := docker.NewAPIClient() if err != nil { return nil, errors.Wrap(err, "getting docker client") } - localCluster := isLocal(kubeContext) + localCluster, err := configutil.GetLocalCluster() + if err != nil { + return nil, errors.Wrap(err, "getting localCluster") + } var pushImages bool if cfg.Push == nil { @@ -62,15 +65,10 @@ func NewBuilder(cfg *latest.LocalBuild, kubeContext string) (*Builder, error) { localDocker: localDocker, localCluster: localCluster, pushImages: pushImages, + skipTests: skipTests, }, nil } -func isLocal(kubeContext string) bool { - return kubeContext == constants.DefaultMinikubeContext || - kubeContext == constants.DefaultDockerForDesktopContext || - kubeContext == constants.DefaultDockerDesktopContext -} - // Labels are labels specific to local builder. func (b *Builder) Labels() map[string]string { labels := map[string]string{ diff --git a/pkg/skaffold/build/parallel.go b/pkg/skaffold/build/parallel.go index bf150d47450..6f145b8a56a 100644 --- a/pkg/skaffold/build/parallel.go +++ b/pkg/skaffold/build/parallel.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,42 +30,56 @@ import ( const bufferedLinesPerArtifact = 10000 -type artifactBuilder func(ctx context.Context, out io.Writer, tagger tag.Tagger, artifact *latest.Artifact) (string, error) +type artifactBuilder func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) // InParallel builds a list of artifacts in parallel but prints the logs in sequential order. -func InParallel(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact, buildArtifact artifactBuilder) ([]Artifact, error) { +func InParallel(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact, buildArtifact artifactBuilder) ([]Artifact, error) { if len(artifacts) == 1 { - return InSequence(ctx, out, tagger, artifacts, buildArtifact) + return InSequence(ctx, out, tags, artifacts, buildArtifact) } ctx, cancel := context.WithCancel(ctx) defer cancel() n := len(artifacts) - tags := make([]string, n) + finalTags := make([]string, n) errs := make([]error, n) - outputs := make([]chan (string), n) + outputs := make([]chan []byte, n) // Run builds in // for index := range artifacts { i := index - lines := make(chan (string), bufferedLinesPerArtifact) + lines := make(chan []byte, bufferedLinesPerArtifact) outputs[i] = lines r, w := io.Pipe() + // Log to the pipe, output will be collected and printed later go func() { - // Log to the pipe, output will be collected and printed later - fmt.Fprintf(w, "Building [%s]...\n", artifacts[i].ImageName) + // Make sure logs are printed in colors + var cw io.WriteCloser + if color.IsTerminal(out) { + cw = color.ColoredWriteCloser{WriteCloser: w} + } else { + cw = w + } + + color.Default.Fprintf(cw, "Building [%s]...\n", artifacts[i].ImageName) + + tag, present := tags[artifacts[i].ImageName] + if !present { + errs[i] = fmt.Errorf("unable to find tag for image %s", artifacts[i].ImageName) + } else { + finalTags[i], errs[i] = buildArtifact(ctx, cw, artifacts[i], tag) + } - tags[i], errs[i] = buildArtifact(ctx, w, tagger, artifacts[i]) - w.Close() + cw.Close() }() go func() { scanner := bufio.NewScanner(r) for scanner.Scan() { - lines <- scanner.Text() + lines <- scanner.Bytes() } close(lines) }() @@ -76,7 +90,8 @@ func InParallel(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts for i, artifact := range artifacts { for line := range outputs[i] { - color.Default.Fprintln(out, line) + out.Write(line) + fmt.Fprintln(out) } if errs[i] != nil { @@ -85,7 +100,7 @@ func InParallel(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts built = append(built, Artifact{ ImageName: artifact.ImageName, - Tag: tags[i], + Tag: finalTags[i], }) } diff --git a/pkg/skaffold/build/parallel_test.go b/pkg/skaffold/build/parallel_test.go new file mode 100644 index 00000000000..75dbb50fdaa --- /dev/null +++ b/pkg/skaffold/build/parallel_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package build + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestInParallel(t *testing.T) { + var tests = []struct { + description string + buildArtifact artifactBuilder + tags tag.ImageTags + expectedArtifacts []Artifact + expectedOut string + shouldErr bool + }{ + { + description: "build succeeds", + buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + return fmt.Sprintf("%s@sha256:abac", tag), nil + }, + tags: tag.ImageTags{ + "skaffold/image1": "skaffold/image1:v0.0.1", + "skaffold/image2": "skaffold/image2:v0.0.2", + }, + expectedArtifacts: []Artifact{ + {ImageName: "skaffold/image1", Tag: "skaffold/image1:v0.0.1@sha256:abac"}, + {ImageName: "skaffold/image2", Tag: "skaffold/image2:v0.0.2@sha256:abac"}, + }, + expectedOut: "Building [skaffold/image1]...\nBuilding [skaffold/image2]...\n", + }, + { + description: "build fails", + buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + return "", fmt.Errorf("build fails") + }, + tags: tag.ImageTags{ + "skaffold/image1": "", + }, + expectedOut: "Building [skaffold/image1]...\n", + shouldErr: true, + }, + { + description: "tag not found", + tags: tag.ImageTags{}, + expectedOut: "Building [skaffold/image1]...\n", + shouldErr: true, + }, + } + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + out := new(bytes.Buffer) + artifacts := []*latest.Artifact{ + {ImageName: "skaffold/image1"}, + {ImageName: "skaffold/image2"}, + } + + got, err := InParallel(context.Background(), out, test.tags, artifacts, test.buildArtifact) + + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expectedArtifacts, got) + testutil.CheckDeepEqual(t, test.expectedOut, out.String()) + }) + } +} diff --git a/pkg/skaffold/build/plugin/core.go b/pkg/skaffold/build/plugin/core.go new file mode 100644 index 00000000000..b62d3384b9f --- /dev/null +++ b/pkg/skaffold/build/plugin/core.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "os" + "os/signal" + "syscall" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/builders/bazel" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/builders/docker" + hashiplugin "github.com/hashicorp/go-plugin" + "github.com/pkg/errors" +) + +// SkaffoldCorePluginExecutionMap maps the core plugin name to the execution function +var SkaffoldCorePluginExecutionMap = map[string]func() error{ + "docker": docker.Execute, + "bazel": bazel.Execute, +} + +// ShouldExecuteCorePlugin returns true if env variables for plugins are set properly +// and the plugin passed in is a core plugin +func ShouldExecuteCorePlugin() bool { + if os.Getenv(constants.SkaffoldPluginKey) != constants.SkaffoldPluginValue { + return false + } + plugin := os.Getenv(constants.SkaffoldPluginName) + _, ok := SkaffoldCorePluginExecutionMap[plugin] + return ok +} + +var cancelError error + +// Execute executes a plugin, assumes ShouldExecuteCorePlugin has already been called +func Execute() error { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + plugin := os.Getenv(constants.SkaffoldPluginName) + + errCh := make(chan error, 1) + + go func() { + errCh <- SkaffoldCorePluginExecutionMap[plugin]() + }() + + go func() { + <-sigs + errCh <- cancelError + }() + + err := <-errCh + + if err == cancelError { + hashiplugin.CleanupClients() + } + if err != nil { + hashiplugin.CleanupClients() + return errors.Wrap(err, "executing plugin") + } + + return nil +} diff --git a/pkg/skaffold/build/plugin/plugin.go b/pkg/skaffold/build/plugin/plugin.go new file mode 100644 index 00000000000..cec2c2a13ee --- /dev/null +++ b/pkg/skaffold/build/plugin/plugin.go @@ -0,0 +1,161 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/shared" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + plugin "github.com/hashicorp/go-plugin" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // For testing + randomID = util.RandomFourCharacterID +) + +// NewPluginBuilder initializes and returns all required plugin builders +func NewPluginBuilder(cfg *latest.BuildConfig, opts *config.SkaffoldOptions) (shared.PluginBuilder, error) { + // We're a host. Start by launching the plugin process. + logrus.SetOutput(os.Stdout) + + builders := map[string]shared.PluginBuilder{} + + for _, a := range cfg.Artifacts { + p := a.BuilderPlugin.Name + if _, ok := builders[p]; ok { + continue + } + cmd := exec.Command(p) + if _, ok := SkaffoldCorePluginExecutionMap[p]; ok { + executable, err := os.Executable() + if err != nil { + return nil, errors.Wrap(err, "getting executable path") + } + cmd = exec.Command(executable) + cmd.Env = append(os.Environ(), []string{fmt.Sprintf("%s=%s", constants.SkaffoldPluginKey, constants.SkaffoldPluginValue), + fmt.Sprintf("%s=%s", constants.SkaffoldPluginName, p)}...) + } + + client := plugin.NewClient(&plugin.ClientConfig{ + Stderr: os.Stderr, + SyncStderr: os.Stderr, + SyncStdout: os.Stdout, + Managed: true, + HandshakeConfig: shared.Handshake, + Plugins: shared.PluginMap, + Cmd: cmd, + }) + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, errors.Wrap(err, "connecting via rpc") + } + + // Request the plugin + raw, err := rpcClient.Dispense(p) + if err != nil { + return nil, errors.Wrap(err, "requesting rpc plugin") + } + pluginBuilder := raw.(shared.PluginBuilder) + builders[p] = pluginBuilder + } + + b := &Builder{ + Builders: builders, + } + b.Init(opts, cfg.ExecutionEnvironment) + return b, nil +} + +type Builder struct { + Builders map[string]shared.PluginBuilder +} + +func (b *Builder) Init(opts *config.SkaffoldOptions, env *latest.ExecutionEnvironment) { + for _, builder := range b.Builders { + builder.Init(opts, env) + } +} + +// Labels are labels applied to deployed resources. +func (b *Builder) Labels() map[string]string { + labels := map[string]string{} + for _, builder := range b.Builders { + for k, v := range builder.Labels() { + if val, ok := labels[k]; ok { + random := fmt.Sprintf("%s-%s", k, randomID()) + logrus.Warnf("%s=%s label exists, saving %s=%s as %s=%s to avoid overlap", k, val, k, v, random, v) + labels[random] = v + continue + } + labels[k] = v + } + } + return labels +} + +func (b *Builder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + var builtArtifacts []build.Artifact + // Group artifacts by plugin name + m := retrieveArtifactsByPlugin(artifacts) + // Group artifacts by builder + for name, builder := range b.Builders { + bArts, err := builder.Build(ctx, out, tags, m[name]) + if err != nil { + return nil, errors.Wrapf(err, "building artifacts with builder %s", name) + } + builtArtifacts = append(builtArtifacts, bArts...) + } + return builtArtifacts, nil +} + +func (b *Builder) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + // Group artifacts by builder + for name, builder := range b.Builders { + if name != artifact.BuilderPlugin.Name { + continue + } + return builder.DependenciesForArtifact(ctx, artifact) + } + return nil, errors.New("couldn't find plugin builder to get dependencies for artifact") +} + +func retrieveArtifactsByPlugin(artifacts []*latest.Artifact) map[string][]*latest.Artifact { + m := map[string][]*latest.Artifact{} + for _, a := range artifacts { + if _, ok := m[a.BuilderPlugin.Name]; ok { + m[a.BuilderPlugin.Name] = append(m[a.BuilderPlugin.Name], a) + continue + } + m[a.BuilderPlugin.Name] = []*latest.Artifact{a} + } + return m +} diff --git a/pkg/skaffold/build/plugin/plugin_test.go b/pkg/skaffold/build/plugin/plugin_test.go new file mode 100644 index 00000000000..5abfb8e3d1a --- /dev/null +++ b/pkg/skaffold/build/plugin/plugin_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "context" + "io" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/shared" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +type mockBuilder struct { + labels map[string]string + artifacts []build.Artifact +} + +func (b *mockBuilder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + return b.artifacts, nil +} + +func (b *mockBuilder) Labels() map[string]string { + return b.labels +} + +func (b *mockBuilder) Init(opts *config.SkaffoldOptions, env *latest.ExecutionEnvironment) {} + +func (b *mockBuilder) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + return nil, nil +} + +func TestPluginBuilderLabels(t *testing.T) { + tests := []struct { + name string + builder shared.PluginBuilder + expected map[string]string + }{ + { + name: "check labels", + builder: &Builder{ + Builders: map[string]shared.PluginBuilder{ + "mock-one": &mockBuilder{ + labels: map[string]string{"key-one": "value-one"}, + }, + "mock-two": &mockBuilder{ + labels: map[string]string{"key-two": "value-two"}, + }, + }, + }, + expected: map[string]string{ + "key-one": "value-one", + "key-two": "value-two", + }, + }, + { + name: "check overlapping labels", + builder: &Builder{ + Builders: map[string]shared.PluginBuilder{ + "mock-one": &mockBuilder{ + labels: map[string]string{"key-one": "value"}, + }, + "mock-two": &mockBuilder{ + labels: map[string]string{"key-one": "value"}, + }, + }, + }, + expected: map[string]string{ + "key-one": "value", + "key-one-rand": "value", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + original := randomID + mockRandomID := func() string { + return "rand" + } + randomID = mockRandomID + defer func() { + randomID = original + }() + + actual := test.builder.Labels() + testutil.CheckErrorAndDeepEqual(t, false, nil, test.expected, actual) + }) + } +} diff --git a/pkg/skaffold/build/prebuilt.go b/pkg/skaffold/build/prebuilt.go index 884988d9def..a32f7db8530 100644 --- a/pkg/skaffold/build/prebuilt.go +++ b/pkg/skaffold/build/prebuilt.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -46,7 +46,7 @@ func (b *prebuiltImagesBuilder) Labels() map[string]string { } } -func (b *prebuiltImagesBuilder) Build(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]Artifact, error) { +func (b *prebuiltImagesBuilder) Build(ctx context.Context, out io.Writer, _ tag.ImageTags, artifacts []*latest.Artifact) ([]Artifact, error) { tags := make(map[string]string) for _, tag := range b.images { @@ -82,3 +82,8 @@ func (b *prebuiltImagesBuilder) Build(ctx context.Context, out io.Writer, tagger return builds, nil } + +// DependenciesForArtifact returns nil since a prebuilt image should have no dependencies +func (b *prebuiltImagesBuilder) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + return nil, nil +} diff --git a/pkg/skaffold/build/prebuilt_test.go b/pkg/skaffold/build/prebuilt_test.go index ce44a8e8c53..df501f2ab6f 100644 --- a/pkg/skaffold/build/prebuilt_test.go +++ b/pkg/skaffold/build/prebuilt_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/build/sequence.go b/pkg/skaffold/build/sequence.go index 4eddb688e65..a242a40d8d0 100644 --- a/pkg/skaffold/build/sequence.go +++ b/pkg/skaffold/build/sequence.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ package build import ( "context" + "fmt" "io" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" @@ -27,20 +28,25 @@ import ( ) // InSequence builds a list of artifacts in sequence. -func InSequence(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact, buildArtifact artifactBuilder) ([]Artifact, error) { +func InSequence(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact, buildArtifact artifactBuilder) ([]Artifact, error) { var builds []Artifact for _, artifact := range artifacts { color.Default.Fprintf(out, "Building [%s]...\n", artifact.ImageName) - tag, err := buildArtifact(ctx, out, tagger, artifact) + tag, present := tags[artifact.ImageName] + if !present { + return nil, fmt.Errorf("unable to find tag for image %s", artifact.ImageName) + } + + finalTag, err := buildArtifact(ctx, out, artifact, tag) if err != nil { return nil, errors.Wrapf(err, "building [%s]", artifact.ImageName) } builds = append(builds, Artifact{ ImageName: artifact.ImageName, - Tag: tag, + Tag: finalTag, }) } diff --git a/pkg/skaffold/build/sequence_test.go b/pkg/skaffold/build/sequence_test.go new file mode 100644 index 00000000000..326877333b9 --- /dev/null +++ b/pkg/skaffold/build/sequence_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package build + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestInSequence(t *testing.T) { + var tests = []struct { + description string + buildArtifact artifactBuilder + tags tag.ImageTags + expectedArtifacts []Artifact + expectedOut string + shouldErr bool + }{ + { + description: "build succeeds", + buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + return fmt.Sprintf("%s@sha256:abac", tag), nil + }, + tags: tag.ImageTags{ + "skaffold/image1": "skaffold/image1:v0.0.1", + "skaffold/image2": "skaffold/image2:v0.0.2", + }, + expectedArtifacts: []Artifact{ + {ImageName: "skaffold/image1", Tag: "skaffold/image1:v0.0.1@sha256:abac"}, + {ImageName: "skaffold/image2", Tag: "skaffold/image2:v0.0.2@sha256:abac"}, + }, + expectedOut: "Building [skaffold/image1]...\nBuilding [skaffold/image2]...\n", + }, + { + description: "build fails", + buildArtifact: func(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { + return "", fmt.Errorf("build fails") + }, + tags: tag.ImageTags{ + "skaffold/image1": "", + }, + expectedOut: "Building [skaffold/image1]...\n", + shouldErr: true, + }, + { + description: "tag not found", + tags: tag.ImageTags{}, + expectedOut: "Building [skaffold/image1]...\n", + shouldErr: true, + }, + } + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + out := new(bytes.Buffer) + artifacts := []*latest.Artifact{ + {ImageName: "skaffold/image1"}, + {ImageName: "skaffold/image2"}, + } + + got, err := InSequence(context.Background(), out, test.tags, artifacts, test.buildArtifact) + + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expectedArtifacts, got) + testutil.CheckDeepEqual(t, test.expectedOut, out.String()) + }) + } +} diff --git a/pkg/skaffold/build/tag/custom.go b/pkg/skaffold/build/tag/custom.go index 44b2aa41957..dcf8e515859 100644 --- a/pkg/skaffold/build/tag/custom.go +++ b/pkg/skaffold/build/tag/custom.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,11 +34,11 @@ func (c *CustomTag) Labels() map[string]string { } // GenerateFullyQualifiedImageName tags an image with the custom tag -func (c *CustomTag) GenerateFullyQualifiedImageName(workingDir string, opts Options) (string, error) { +func (c *CustomTag) GenerateFullyQualifiedImageName(workingDir, imageName string) (string, error) { tag := c.Tag if tag == "" { return "", errors.New("custom tag not provided") } - return fmt.Sprintf("%s:%s", opts.ImageName, tag), nil + return fmt.Sprintf("%s:%s", imageName, tag), nil } diff --git a/pkg/skaffold/build/tag/custom_test.go b/pkg/skaffold/build/tag/custom_test.go index 34c793f152d..dc04bc00879 100644 --- a/pkg/skaffold/build/tag/custom_test.go +++ b/pkg/skaffold/build/tag/custom_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,9 +27,7 @@ func TestCustomTag_GenerateFullyQualifiedImageName(t *testing.T) { Tag: "1.2.3-beta", } - tag, err := c.GenerateFullyQualifiedImageName(".", Options{ - ImageName: "test", - }) + tag, err := c.GenerateFullyQualifiedImageName(".", "test") testutil.CheckErrorAndDeepEqual(t, false, err, "test:1.2.3-beta", tag) } diff --git a/pkg/skaffold/build/tag/date_time.go b/pkg/skaffold/build/tag/date_time.go index b60f1d2f63a..3498b9af898 100644 --- a/pkg/skaffold/build/tag/date_time.go +++ b/pkg/skaffold/build/tag/date_time.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,7 +49,7 @@ func (tagger *dateTimeTagger) Labels() map[string]string { } // GenerateFullyQualifiedImageName tags an image with the supplied image name and the current timestamp -func (tagger *dateTimeTagger) GenerateFullyQualifiedImageName(workingDir string, opts Options) (string, error) { +func (tagger *dateTimeTagger) GenerateFullyQualifiedImageName(workingDir, imageName string) (string, error) { format := tagTime if len(tagger.Format) > 0 { format = tagger.Format @@ -65,5 +65,5 @@ func (tagger *dateTimeTagger) GenerateFullyQualifiedImageName(workingDir string, return "", fmt.Errorf("bad timezone provided: \"%s\", error: %s", timezone, err) } - return fmt.Sprintf("%s:%s", opts.ImageName, tagger.timeFn().In(loc).Format(format)), nil + return fmt.Sprintf("%s:%s", imageName, tagger.timeFn().In(loc).Format(format)), nil } diff --git a/pkg/skaffold/build/tag/date_time_test.go b/pkg/skaffold/build/tag/date_time_test.go index f2d2d59f506..093b17164bb 100644 --- a/pkg/skaffold/build/tag/date_time_test.go +++ b/pkg/skaffold/build/tag/date_time_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -66,9 +66,7 @@ func TestDateTime_GenerateFullyQualifiedImageName(t *testing.T) { TimeZone: test.timezone, timeFn: func() time.Time { return test.buildTime }, } - tag, err := c.GenerateFullyQualifiedImageName(".", Options{ - ImageName: test.imageName, - }) + tag, err := c.GenerateFullyQualifiedImageName(".", test.imageName) testutil.CheckErrorAndDeepEqual(t, false, err, test.want, tag) }) diff --git a/pkg/skaffold/build/tag/env_template.go b/pkg/skaffold/build/tag/env_template.go index 306d7a160b7..f55f6003efa 100644 --- a/pkg/skaffold/build/tag/env_template.go +++ b/pkg/skaffold/build/tag/env_template.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/warnings" "github.com/pkg/errors" ) @@ -36,6 +37,7 @@ func NewEnvTemplateTagger(t string) (Tagger, error) { if err != nil { return nil, errors.Wrap(err, "parsing template") } + return &envTemplateTagger{ Template: tmpl, }, nil @@ -48,25 +50,30 @@ func (t *envTemplateTagger) Labels() map[string]string { } // GenerateFullyQualifiedImageName tags an image with the custom tag -func (t *envTemplateTagger) GenerateFullyQualifiedImageName(workingDir string, opts Options) (string, error) { - customMap := CreateEnvVarMap(opts.ImageName, opts.Digest) - return util.ExecuteEnvTemplate(t.Template, customMap) -} +func (t *envTemplateTagger) GenerateFullyQualifiedImageName(workingDir, imageName string) (string, error) { + tag, err := util.ExecuteEnvTemplate(t.Template, map[string]string{ + "IMAGE_NAME": imageName, + "DIGEST": "_DEPRECATED_DIGEST_", + "DIGEST_ALGO": "_DEPRECATED_DIGEST_ALGO_", + "DIGEST_HEX": "_DEPRECATED_DIGEST_HEX_", + }) + if err != nil { + return "", err + } -// CreateEnvVarMap creates a set of environment variables for use in Templates from the given -// image name and digest -func CreateEnvVarMap(imageName string, digest string) map[string]string { - customMap := map[string]string{} - customMap["IMAGE_NAME"] = imageName - customMap["DIGEST"] = digest - if digest != "" { - names := strings.SplitN(digest, ":", 2) - if len(names) >= 2 { - customMap["DIGEST_ALGO"] = names[0] - customMap["DIGEST_HEX"] = names[1] - } else { - customMap["DIGEST_HEX"] = digest + if strings.Contains(tag, "_DEPRECATED_DIGEST_") || + strings.Contains(tag, "_DEPRECATED_DIGEST_ALGO_") || + strings.Contains(tag, "_DEPRECATED_DIGEST_HEX_") { + warnings.Printf("{{.DIGEST}}, {{.DIGEST_ALGO}} and {{.DIGEST_HEX}} are deprecated, image digest will now automatically be appended to image tags") + + switch { + case strings.HasSuffix(tag, "@_DEPRECATED_DIGEST_"): + tag = strings.TrimSuffix(tag, "@_DEPRECATED_DIGEST_") + + case strings.HasSuffix(tag, "@_DEPRECATED_DIGEST_ALGO_:_DEPRECATED_DIGEST_HEX_"): + tag = strings.TrimSuffix(tag, "@_DEPRECATED_DIGEST_ALGO_:_DEPRECATED_DIGEST_HEX_") } } - return customMap + + return tag, nil } diff --git a/pkg/skaffold/build/tag/env_template_test.go b/pkg/skaffold/build/tag/env_template_test.go index ef45c6e16c8..fc5819cf0e2 100644 --- a/pkg/skaffold/build/tag/env_template_test.go +++ b/pkg/skaffold/build/tag/env_template_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,31 +20,30 @@ import ( "testing" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/warnings" "github.com/GoogleContainerTools/skaffold/testutil" ) func TestEnvTemplateTagger_GenerateFullyQualifiedImageName(t *testing.T) { tests := []struct { - name string - template string - imageName string - digest string - env []string - expected string + name string + template string + imageName string + env []string + expected string + expectedWarnings []string }{ { name: "empty env", - template: "{{.IMAGE_NAME}}:{{.DIGEST}}", + template: "{{.IMAGE_NAME}}", imageName: "foo", - digest: "bar", - expected: "foo:bar", + expected: "foo", }, { name: "env", template: "{{.FOO}}-{{.BAZ}}:latest", env: []string{"FOO=BAR", "BAZ=BAT"}, imageName: "foo", - digest: "bar", expected: "BAR-BAT:latest", }, { @@ -52,15 +51,42 @@ func TestEnvTemplateTagger_GenerateFullyQualifiedImageName(t *testing.T) { template: "{{.IMAGE_NAME}}-{{.FROM_ENV}}:latest", env: []string{"FROM_ENV=FOO", "IMAGE_NAME=BAT"}, imageName: "image_name", - digest: "bar", expected: "image_name-FOO:latest", }, { - name: "digest algo hex", - template: "{{.IMAGE_NAME}}:{{.DIGEST_ALGO}}-{{.DIGEST_HEX}}", - imageName: "foo", - digest: "sha256:abcd", - expected: "foo:sha256-abcd", + name: "ignore @{{.DIGEST}} suffix", + template: "{{.IMAGE_NAME}}:tag@{{.DIGEST}}", + imageName: "foo", + expected: "foo:tag", + expectedWarnings: []string{"{{.DIGEST}}, {{.DIGEST_ALGO}} and {{.DIGEST_HEX}} are deprecated, image digest will now automatically be appended to image tags"}, + }, + { + name: "ignore @{{.DIGEST_ALGO}}:{{.DIGEST_HEX}} suffix", + template: "{{.IMAGE_NAME}}:tag@{{.DIGEST_ALGO}}:{{.DIGEST_HEX}}", + imageName: "image_name", + expected: "image_name:tag", + expectedWarnings: []string{"{{.DIGEST}}, {{.DIGEST_ALGO}} and {{.DIGEST_HEX}} are deprecated, image digest will now automatically be appended to image tags"}, + }, + { + name: "digest is deprecated", + template: "{{.IMAGE_NAME}}:{{.DIGEST}}", + imageName: "foo", + expected: "foo:_DEPRECATED_DIGEST_", + expectedWarnings: []string{"{{.DIGEST}}, {{.DIGEST_ALGO}} and {{.DIGEST_HEX}} are deprecated, image digest will now automatically be appended to image tags"}, + }, + { + name: "digest algo is deprecated", + template: "{{.IMAGE_NAME}}:{{.DIGEST_ALGO}}", + imageName: "foo", + expected: "foo:_DEPRECATED_DIGEST_ALGO_", + expectedWarnings: []string{"{{.DIGEST}}, {{.DIGEST_ALGO}} and {{.DIGEST_HEX}} are deprecated, image digest will now automatically be appended to image tags"}, + }, + { + name: "digest hex is deprecated", + template: "{{.IMAGE_NAME}}:{{.DIGEST_HEX}}", + imageName: "foo", + expected: "foo:_DEPRECATED_DIGEST_HEX_", + expectedWarnings: []string{"{{.DIGEST}}, {{.DIGEST_ALGO}} and {{.DIGEST_HEX}} are deprecated, image digest will now automatically be appended to image tags"}, }, } for _, test := range tests { @@ -69,15 +95,17 @@ func TestEnvTemplateTagger_GenerateFullyQualifiedImageName(t *testing.T) { return test.env } + defer func(w warnings.Warner) { warnings.Printf = w }(warnings.Printf) + fakeWarner := &warnings.Collect{} + warnings.Printf = fakeWarner.Warnf + c, err := NewEnvTemplateTagger(test.template) testutil.CheckError(t, false, err) - got, err := c.GenerateFullyQualifiedImageName("", Options{ - ImageName: test.imageName, - Digest: test.digest, - }) + got, err := c.GenerateFullyQualifiedImageName("", test.imageName) testutil.CheckErrorAndDeepEqual(t, false, err, test.expected, got) + testutil.CheckDeepEqual(t, test.expectedWarnings, fakeWarner.Warnings) }) } } diff --git a/pkg/skaffold/build/tag/git_commit.go b/pkg/skaffold/build/tag/git_commit.go index c7d1675cff2..d7b9b1e6954 100644 --- a/pkg/skaffold/build/tag/git_commit.go +++ b/pkg/skaffold/build/tag/git_commit.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "os/exec" - "strings" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" @@ -39,10 +38,11 @@ func (c *GitCommit) Labels() map[string]string { } // GenerateFullyQualifiedImageName tags an image with the supplied image name and the git commit. -func (c *GitCommit) GenerateFullyQualifiedImageName(workingDir string, opts Options) (string, error) { +func (c *GitCommit) GenerateFullyQualifiedImageName(workingDir string, imageName string) (string, error) { hash, err := runGit(workingDir, "rev-parse", "--short", "HEAD") if err != nil { - return fallbackOnDigest(opts, err), nil + logrus.Warnln("Unable to find git commit:", err) + return fmt.Sprintf("%s:dirty", imageName), nil } changes, err := runGit(workingDir, "status", ".", "--porcelain") @@ -51,13 +51,16 @@ func (c *GitCommit) GenerateFullyQualifiedImageName(workingDir string, opts Opti } if len(changes) > 0 { - return dirtyTag(hash, opts), nil + return fmt.Sprintf("%s:%s-dirty", imageName, hash), nil } // Ignore error. It means there's no tag. tag, _ := runGit(workingDir, "describe", "--tags", "--exact-match") + if len(tag) > 0 { + return fmt.Sprintf("%s:%s", imageName, tag), nil + } - return commitOrTag(hash, tag, opts), nil + return fmt.Sprintf("%s:%s", imageName, hash), nil } func runGit(workingDir string, arg ...string) (string, error) { @@ -71,25 +74,3 @@ func runGit(workingDir string, arg ...string) (string, error) { return string(bytes.TrimSpace(out)), nil } - -func commitOrTag(currentTag string, tag string, opts Options) string { - if len(tag) > 0 { - currentTag = tag - } - - return fmt.Sprintf("%s:%s", opts.ImageName, currentTag) -} - -func shortDigest(opts Options) string { - return strings.TrimPrefix(opts.Digest, "sha256:")[0:7] -} - -func dirtyTag(currentTag string, opts Options) string { - return fmt.Sprintf("%s:%s-dirty-%s", opts.ImageName, currentTag, shortDigest(opts)) -} - -func fallbackOnDigest(opts Options, err error) string { - logrus.Warnln("Using digest instead of git commit:", err) - - return fmt.Sprintf("%s:dirty-%s", opts.ImageName, shortDigest(opts)) -} diff --git a/pkg/skaffold/build/tag/git_commit_test.go b/pkg/skaffold/build/tag/git_commit_test.go index 1a4cb56723c..46bd71b2383 100644 --- a/pkg/skaffold/build/tag/git_commit_test.go +++ b/pkg/skaffold/build/tag/git_commit_test.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -68,7 +68,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "dirty", - expectedName: "test:eefe1b9-dirty-abababa", + expectedName: "test:eefe1b9-dirty", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). @@ -79,7 +79,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "ignore tag when dirty", - expectedName: "test:eefe1b9-dirty-abababa", + expectedName: "test:eefe1b9-dirty", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). @@ -91,7 +91,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "untracked", - expectedName: "test:eefe1b9-dirty-abababa", + expectedName: "test:eefe1b9-dirty", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). @@ -116,7 +116,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "deleted file", - expectedName: "test:279d53f-dirty-abababa", + expectedName: "test:279d53f-dirty", createGitRepo: func(dir string) { gitInit(t, dir). write("source1.go", []byte("code1")). @@ -128,7 +128,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "rename", - expectedName: "test:eefe1b9-dirty-abababa", + expectedName: "test:eefe1b9-dirty", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). @@ -186,7 +186,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "updated artifact in dirty repo", - expectedName: "test:0c60cb8-dirty-abababa", + expectedName: "test:0c60cb8-dirty", createGitRepo: func(dir string) { gitInit(t, dir). mkdir("artifact1").write("artifact1/source.go", []byte("code")). @@ -199,14 +199,14 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { }, { description: "non git repo", - expectedName: "test:dirty-abababa", + expectedName: "test:dirty", createGitRepo: func(dir string) { ioutil.WriteFile(filepath.Join(dir, "source.go"), []byte("code"), os.ModePerm) }, }, { description: "git repo with no commit", - expectedName: "test:dirty-abababa", + expectedName: "test:dirty", createGitRepo: func(dir string) { gitInit(t, dir) }, @@ -223,10 +223,7 @@ func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { c := &GitCommit{} - name, err := c.GenerateFullyQualifiedImageName(workspace, Options{ - ImageName: "test", - Digest: "sha256:ababababababababababa", - }) + name, err := c.GenerateFullyQualifiedImageName(workspace, "test") testutil.CheckErrorAndDeepEqual(t, tt.shouldErr, err, tt.expectedName, name) }) diff --git a/pkg/skaffold/build/tag/sha256.go b/pkg/skaffold/build/tag/sha256.go index 153db9a6478..c0b4e4f4115 100644 --- a/pkg/skaffold/build/tag/sha256.go +++ b/pkg/skaffold/build/tag/sha256.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +17,8 @@ limitations under the License. package tag import ( - "fmt" - "strings" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" ) // ChecksumTagger tags an image by the sha256 of the image tarball @@ -33,13 +31,17 @@ func (c *ChecksumTagger) Labels() map[string]string { } } -// GenerateFullyQualifiedImageName tags an image with the supplied image name and the sha256 checksum of the image -func (c *ChecksumTagger) GenerateFullyQualifiedImageName(workingDir string, opts Options) (string, error) { - digest := opts.Digest - sha256 := strings.TrimPrefix(opts.Digest, "sha256:") - if sha256 == digest { - return "", fmt.Errorf("digest wrong format: %s, expected sha256:", digest) +func (c *ChecksumTagger) GenerateFullyQualifiedImageName(workingDir, imageName string) (string, error) { + parsed, err := docker.ParseReference(imageName) + if err != nil { + return "", err + } + + if parsed.Tag == "" { + // No supplied tag, so use "latest". + return imageName + ":latest", nil } - return fmt.Sprintf("%s:%s", opts.ImageName, sha256), nil + // They already have a tag. + return imageName, nil } diff --git a/pkg/skaffold/build/tag/sha256_test.go b/pkg/skaffold/build/tag/sha256_test.go index b82bd144cce..393105f74a4 100644 --- a/pkg/skaffold/build/tag/sha256_test.go +++ b/pkg/skaffold/build/tag/sha256_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,43 +23,23 @@ import ( ) func TestGenerateFullyQualifiedImageName(t *testing.T) { - var tests = []struct { - description string - imageName string - digest string - expected string - shouldErr bool - }{ - { - description: "no error", - imageName: "test", - digest: "sha256:12345abcde", - expected: "test:12345abcde", - }, - { - description: "wrong digest format", - imageName: "test", - digest: "wrong:digest:format", - shouldErr: true, - }, - { - description: "wrong digest format no colon", - imageName: "test", - digest: "sha256", - shouldErr: true, - }, - } + c := &ChecksumTagger{} - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - c := &ChecksumTagger{} + tag, err := c.GenerateFullyQualifiedImageName(".", "img:tag") + testutil.CheckErrorAndDeepEqual(t, false, err, "img:tag", tag) - tag, err := c.GenerateFullyQualifiedImageName(".", Options{ - ImageName: test.imageName, - Digest: test.digest, - }) + tag, err = c.GenerateFullyQualifiedImageName(".", "img") + testutil.CheckErrorAndDeepEqual(t, false, err, "img:latest", tag) - testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, tag) - }) - } + tag, err = c.GenerateFullyQualifiedImageName(".", "registry.example.com:8080/img:tag") + testutil.CheckErrorAndDeepEqual(t, false, err, "registry.example.com:8080/img:tag", tag) + + tag, err = c.GenerateFullyQualifiedImageName(".", "registry.example.com:8080/img") + testutil.CheckErrorAndDeepEqual(t, false, err, "registry.example.com:8080/img:latest", tag) + + tag, err = c.GenerateFullyQualifiedImageName(".", "registry.example.com/img") + testutil.CheckErrorAndDeepEqual(t, false, err, "registry.example.com/img:latest", tag) + + tag, err = c.GenerateFullyQualifiedImageName(".", "registry.example.com:8080:garbage") + testutil.CheckErrorAndDeepEqual(t, true, err, "", tag) } diff --git a/pkg/skaffold/build/tag/tag.go b/pkg/skaffold/build/tag/tag.go index 83420f751f5..5f77df5fa87 100644 --- a/pkg/skaffold/build/tag/tag.go +++ b/pkg/skaffold/build/tag/tag.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,14 +16,12 @@ limitations under the License. package tag +// ImageTags maps image names to tags +type ImageTags map[string]string + // Tagger is an interface for tag strategies to be implemented against type Tagger interface { Labels() map[string]string - GenerateFullyQualifiedImageName(workingDir string, tagOpts Options) (string, error) -} - -type Options struct { - ImageName string - Digest string + GenerateFullyQualifiedImageName(workingDir string, imageName string) (string, error) } diff --git a/pkg/skaffold/color/debug.test b/pkg/skaffold/color/debug.test deleted file mode 100755 index ed1a1052b8c..00000000000 Binary files a/pkg/skaffold/color/debug.test and /dev/null differ diff --git a/pkg/skaffold/color/formatter.go b/pkg/skaffold/color/formatter.go index 21f5a6f1a3a..478c5822d73 100644 --- a/pkg/skaffold/color/formatter.go +++ b/pkg/skaffold/color/formatter.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -56,6 +56,8 @@ var ( Purple = Color(35) // Cyan can format text to be displayed to the terminal in cyan, using ANSI escape codes. Cyan = Color(36) + // White can format text to be displayed to the terminal in white, using ANSI escape codes. + White = Color(37) // None uses ANSI escape codes to reset all formatting. None = Color(0) @@ -94,9 +96,31 @@ func (c Color) Fprintf(out io.Writer, format string, a ...interface{}) (n int, e return fmt.Fprintf(out, format, a...) } +// ColoredWriteCloser forces printing with colors to an io.WriteCloser. +type ColoredWriteCloser struct { + io.WriteCloser +} + +// ColoredWriter forces printing with colors to an io.Writer. +type ColoredWriter struct { + io.Writer +} + +// OverwriteDefault overwrites default color +func OverwriteDefault(color Color) { + Default = color +} + // This implementation comes from logrus (https://github.com/sirupsen/logrus/blob/master/terminal_check_notappengine.go), // unfortunately logrus doesn't expose a public interface we can use to call it. func isTerminal(w io.Writer) bool { + if _, ok := w.(ColoredWriteCloser); ok { + return true + } + if _, ok := w.(ColoredWriter); ok { + return true + } + switch v := w.(type) { case *os.File: return terminal.IsTerminal(int(v.Fd())) diff --git a/pkg/skaffold/color/formatter_test.go b/pkg/skaffold/color/formatter_test.go index 90bf371476d..0219a2990db 100644 --- a/pkg/skaffold/color/formatter_test.go +++ b/pkg/skaffold/color/formatter_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ import ( "bytes" "io" "testing" + + "github.com/GoogleContainerTools/skaffold/testutil" ) func compareText(t *testing.T, expected, actual string, expectedN int, actualN int, err error) { @@ -85,3 +87,9 @@ func TestFprintfNoTTY(t *testing.T) { expected := "It's been 1 week" compareText(t, expected, b.String(), 16, n, err) } + +func TestOverwriteDefault(t *testing.T) { + testutil.CheckDeepEqual(t, Blue, Default) + OverwriteDefault(Red) + testutil.CheckDeepEqual(t, Red, Default) +} diff --git a/pkg/skaffold/config/options.go b/pkg/skaffold/config/options.go index 6a9cb87dbea..45036928d88 100644 --- a/pkg/skaffold/config/options.go +++ b/pkg/skaffold/config/options.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,16 @@ limitations under the License. package config import ( + "io" "strings" ) +// Output defines which zones on the screen to print to +type Output struct { + Main io.Writer + Logs io.Writer +} + // SkaffoldOptions are options that are set by command line arguments not included // in the config file itself type SkaffoldOptions struct { @@ -30,15 +37,17 @@ type SkaffoldOptions struct { TailDev bool PortForward bool SkipTests bool + ExperimentalGUI bool Profiles []string CustomTag string Namespace string - Watch []string + TargetImages []string Trigger string CustomLabels []string WatchPollInterval int DefaultRepo string PreBuiltImages []string + Command string } // Labels returns a map of labels to be applied to all deployed diff --git a/pkg/skaffold/config/options_test.go b/pkg/skaffold/config/options_test.go index 717423adf9c..5539cc73206 100644 --- a/pkg/skaffold/config/options_test.go +++ b/pkg/skaffold/config/options_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/constants/constants.go b/pkg/skaffold/constants/constants.go index 9fc8646f7f7..6ea31de243f 100644 --- a/pkg/skaffold/constants/constants.go +++ b/pkg/skaffold/constants/constants.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "fmt" "runtime" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/sirupsen/logrus" ) @@ -31,9 +32,6 @@ const ( // context directory DefaultDockerfilePath = "Dockerfile" - DefaultDevTagStrategy = TagStrategySha256 - DefaultRunTagStrategy = TagStrategyGitCommit - // TagStrategySha256 uses the checksum of the built artifact as the tag TagStrategySha256 = "sha256" TagStrategyGitCommit = "gitCommit" @@ -47,21 +45,35 @@ const ( DefaultKustomizationPath = "." - DefaultKanikoImage = "gcr.io/kaniko-project/executor:v0.7.0@sha256:0b4e0812aa17c54a9b8d8c8d7cb35559a892a341650acf7cb428c3e8cb4a3919" - DefaultKanikoSecretName = "kaniko-secret" - DefaultKanikoTimeout = "20m" - DefaultKanikoContainerName = "kaniko" - DefaultKanikoEmptyDirName = "kaniko-emptydir" - DefaultKanikoEmptyDirMountPath = "/kaniko/buildcontext" + DefaultKanikoImage = "gcr.io/kaniko-project/executor:v0.8.0@sha256:32ed8afc3c808d7159a7c1789d46c2abe95c1cb5b7afdd6867e360f0ed952c13" + DefaultKanikoSecretName = "kaniko-secret" + DefaultKanikoTimeout = "20m" + DefaultKanikoContainerName = "kaniko" + DefaultKanikoEmptyDirName = "kaniko-emptydir" + DefaultKanikoEmptyDirMountPath = "/kaniko/buildcontext" + DefaultKanikoDockerConfigSecretName = "docker-cfg" + DefaultKanikoDockerConfigPath = "/kaniko/.docker" DefaultBusyboxImage = "busybox" UpdateCheckEnvironmentVariable = "SKAFFOLD_UPDATE_CHECK" DefaultCloudBuildDockerImage = "gcr.io/cloud-builders/docker" + DefaultCloudBuildMavenImage = "gcr.io/cloud-builders/mvn" + DefaultCloudBuildGradleImage = "gcr.io/cloud-builders/gradle" // A regex matching valid repository names (https://github.com/docker/distribution/blob/master/reference/reference.go) RepositoryComponentRegex string = `^[a-z\d]+(?:(?:[_.]|__|-+)[a-z\d]+)*$` + + SkaffoldPluginKey = "SKAFFOLD_PLUGIN_KEY" + SkaffoldPluginValue = "1337" + SkaffoldPluginName = "SKAFFOLD_PLUGIN_NAME" + DockerBuilderPluginName = "docker" +) + +var ( + GoogleCloudBuild latest.ExecEnvironment = "googleCloudBuild" + Local latest.ExecEnvironment = "local" ) var DefaultKubectlManifests = []string{"k8s/*.yaml"} diff --git a/pkg/skaffold/deploy/deploy.go b/pkg/skaffold/deploy/deploy.go index b02be4494da..bad7c4cf644 100644 --- a/pkg/skaffold/deploy/deploy.go +++ b/pkg/skaffold/deploy/deploy.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,16 +21,8 @@ import ( "io" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" - - "k8s.io/apimachinery/pkg/runtime" ) -// Artifact contains all information about a completed deployment -type Artifact struct { - Obj runtime.Object - Namespace string -} - // Deployer is the Deploy API of skaffold and responsible for deploying // the build results to a Kubernetes cluster type Deployer interface { @@ -38,7 +30,7 @@ type Deployer interface { // Deploy should ensure that the build results are deployed to the Kubernetes // cluster. - Deploy(context.Context, io.Writer, []build.Artifact) ([]Artifact, error) + Deploy(context.Context, io.Writer, []build.Artifact, []Labeller) error // Dependencies returns a list of files that the deployer depends on. // In dev mode, a redeploy will be triggered diff --git a/pkg/skaffold/deploy/helm.go b/pkg/skaffold/deploy/helm.go index 07b833beafa..239f6a26dc0 100644 --- a/pkg/skaffold/deploy/helm.go +++ b/pkg/skaffold/deploy/helm.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,7 +30,6 @@ import ( "strings" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" @@ -66,17 +65,23 @@ func (h *HelmDeployer) Labels() map[string]string { } } -func (h *HelmDeployer) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact) ([]Artifact, error) { - deployResults := []Artifact{} +func (h *HelmDeployer) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact, labellers []Labeller) error { + var dRes []Artifact + + labels := merge(labellers...) + for _, r := range h.Releases { results, err := h.deployRelease(ctx, out, r, builds) if err != nil { releaseName, _ := evaluateReleaseName(r.Name) - return deployResults, errors.Wrapf(err, "deploying %s", releaseName) + return errors.Wrapf(err, "deploying %s", releaseName) } - deployResults = append(deployResults, results...) + + dRes = append(dRes, results...) } - return deployResults, nil + + labelDeployResults(labels, dRes) + return nil } func (h *HelmDeployer) Dependencies() ([]string, error) { @@ -156,7 +161,7 @@ func (h *HelmDeployer) deployRelease(ctx context.Context, out io.Writer, r lates // Dependency builds should be skipped when trying to install a chart // with local dependencies in the chart folder, e.g. the istio helm chart. // This decision is left to the user. - if !r.SkipDependencyBuild { + if !r.SkipBuildDependencies { // First build dependencies. logrus.Infof("Building helm dependencies...") if err := h.helm(ctx, out, "dep", "build", r.ChartPath); err != nil { @@ -236,7 +241,7 @@ func (h *HelmDeployer) deployRelease(ctx context.Context, out io.Writer, r lates if idx > 0 { suffix = strconv.Itoa(idx + 1) } - m := tag.CreateEnvVarMap(b.ImageName, extractTag(b.Tag)) + m := createEnvVarMap(b.ImageName, extractTag(b.Tag)) for k, v := range m { envMap[k+suffix] = v } @@ -267,6 +272,22 @@ func (h *HelmDeployer) deployRelease(ctx context.Context, out io.Writer, r lates return h.getDeployResults(ctx, ns, releaseName), helmErr } +func createEnvVarMap(imageName string, digest string) map[string]string { + customMap := map[string]string{} + customMap["IMAGE_NAME"] = imageName + customMap["DIGEST"] = digest + if digest != "" { + names := strings.SplitN(digest, ":", 2) + if len(names) >= 2 { + customMap["DIGEST_ALGO"] = names[0] + customMap["DIGEST_HEX"] = names[1] + } else { + customMap["DIGEST_HEX"] = digest + } + } + return customMap +} + // imageName if the given string includes a fully qualified docker image name then lets trim just the tag part out func extractTag(imageName string) string { idx := strings.LastIndex(imageName, "/") diff --git a/pkg/skaffold/deploy/helm_test.go b/pkg/skaffold/deploy/helm_test.go index ef08ebe65fa..2fc007bb152 100644 --- a/pkg/skaffold/deploy/helm_test.go +++ b/pkg/skaffold/deploy/helm_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -174,6 +174,26 @@ var testDeployWithTemplatedName = &latest.HelmDeploy{ }, } +var testDeploySkipBuildDependencies = &latest.HelmDeploy{ + Releases: []latest.HelmRelease{ + { + Name: "skaffold-helm", + ChartPath: "stable/chartmuseum", + SkipBuildDependencies: true, + }, + }, +} + +var testDeployRemoteChart = &latest.HelmDeploy{ + Releases: []latest.HelmRelease{ + { + Name: "skaffold-helm-remote", + ChartPath: "stable/chartmuseum", + SkipBuildDependencies: false, + }, + }, +} + var testNamespace = "testNamespace" var validDeployYaml = ` @@ -293,6 +313,22 @@ func TestHelmDeploy(t *testing.T) { builds: testBuilds, shouldErr: true, }, + { + description: "deploy success remote chart with skipBuildDependencies", + cmd: &MockHelm{t: t}, + deployer: NewHelmDeployer(testDeploySkipBuildDependencies, testKubeContext, testNamespace, ""), + builds: testBuilds, + }, + { + description: "deploy error remote chart without skipBuildDependencies", + cmd: &MockHelm{ + t: t, + depResult: fmt.Errorf("unexpected error"), + }, + deployer: NewHelmDeployer(testDeployRemoteChart, testKubeContext, testNamespace, ""), + builds: testBuilds, + shouldErr: true, + }, { description: "get failure should install not upgrade", cmd: &MockHelm{ @@ -408,7 +444,7 @@ func TestHelmDeploy(t *testing.T) { defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) util.DefaultExecCommand = tt.cmd - _, err := tt.deployer.Deploy(context.Background(), ioutil.Discard, tt.builds) + err := tt.deployer.Deploy(context.Background(), ioutil.Discard, tt.builds, nil) testutil.CheckError(t, tt.shouldErr, err) }) diff --git a/pkg/skaffold/deploy/kubectl.go b/pkg/skaffold/deploy/kubectl.go index cf614ea6bb8..00b2e099171 100644 --- a/pkg/skaffold/deploy/kubectl.go +++ b/pkg/skaffold/deploy/kubectl.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,12 +17,8 @@ limitations under the License. package deploy import ( - "bufio" - "bytes" "context" "io" - "io/ioutil" - "strings" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" @@ -66,32 +62,32 @@ func (k *KubectlDeployer) Labels() map[string]string { // Deploy templates the provided manifests with a simple `find and replace` and // runs `kubectl apply` on those manifests -func (k *KubectlDeployer) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact) ([]Artifact, error) { - color.Default.Fprintln(out, "kubectl client version:", k.kubectl.Version()) - if err := k.kubectl.CheckVersion(); err != nil { +func (k *KubectlDeployer) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact, labellers []Labeller) error { + color.Default.Fprintln(out, "kubectl client version:", k.kubectl.Version(ctx)) + if err := k.kubectl.CheckVersion(ctx); err != nil { color.Default.Fprintln(out, err) } manifests, err := k.readManifests(ctx) if err != nil { - return nil, errors.Wrap(err, "reading manifests") + return errors.Wrap(err, "reading manifests") } if len(manifests) == 0 { - return nil, nil + return nil } manifests, err = manifests.ReplaceImages(builds, k.defaultRepo) if err != nil { - return nil, errors.Wrap(err, "replacing images in manifests") + return errors.Wrap(err, "replacing images in manifests") } - updated, err := k.kubectl.Apply(ctx, out, manifests) + manifests, err = manifests.SetLabels(merge(labellers...)) if err != nil { - return nil, errors.Wrap(err, "apply") + return errors.Wrap(err, "setting labels in manifests") } - return parseManifestsForDeploys(k.kubectl.Namespace, updated) + return k.kubectl.Apply(ctx, out, manifests) } // Cleanup deletes what was deployed by calling Deploy. @@ -133,71 +129,16 @@ func (k *KubectlDeployer) manifestFiles(manifests []string) ([]string, error) { return filteredManifests, nil } -func parseManifestsForDeploys(namespace string, manifests kubectl.ManifestList) ([]Artifact, error) { - var results []Artifact - - for _, manifest := range manifests { - b := bufio.NewReader(bytes.NewReader(manifest)) - results = append(results, parseReleaseInfo(namespace, b)...) - } - - return results, nil -} - // readManifests reads the manifests to deploy/delete. func (k *KubectlDeployer) readManifests(ctx context.Context) (kubectl.ManifestList, error) { - files, err := k.manifestFiles(k.Manifests) + manifests, err := k.Dependencies() if err != nil { - return nil, errors.Wrap(err, "expanding user manifest list") + return nil, errors.Wrap(err, "listing manifests") } - var manifests kubectl.ManifestList - for _, manifest := range files { - buf, err := ioutil.ReadFile(manifest) - if err != nil { - return nil, errors.Wrap(err, "reading manifest") - } - - manifests.Append(buf) - } - - for _, manifest := range k.Manifests { - if util.IsURL(manifest) { - buf, err := util.Download(manifest) - if err != nil { - return nil, errors.Wrap(err, "downloading manifest") - } - manifests.Append(buf) - } - } - - for _, m := range k.RemoteManifests { - manifest, err := k.readRemoteManifest(ctx, m) - if err != nil { - return nil, errors.Wrap(err, "get remote manifests") - } - - manifests = append(manifests, manifest) - } - - logrus.Debugln("manifests", manifests.String()) - - return manifests, nil -} - -func (k *KubectlDeployer) readRemoteManifest(ctx context.Context, name string) ([]byte, error) { - var args []string - if parts := strings.Split(name, ":"); len(parts) > 1 { - args = append(args, "--namespace", parts[0]) - name = parts[1] - } - args = append(args, name, "-o", "yaml") - - var manifest bytes.Buffer - err := k.kubectl.Run(ctx, nil, &manifest, "get", nil, args...) - if err != nil { - return nil, errors.Wrap(err, "getting manifest") + if len(manifests) == 0 { + return kubectl.ManifestList{}, nil } - return manifest.Bytes(), nil + return k.kubectl.ReadManifests(ctx, manifests) } diff --git a/pkg/skaffold/deploy/kubectl/cli.go b/pkg/skaffold/deploy/kubectl/cli.go index 4980e12dcfb..17903a5ff06 100644 --- a/pkg/skaffold/deploy/kubectl/cli.go +++ b/pkg/skaffold/deploy/kubectl/cli.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,26 +49,59 @@ func (c *CLI) Delete(ctx context.Context, out io.Writer, manifests ManifestList) } // Apply runs `kubectl apply` on a list of manifests. -func (c *CLI) Apply(ctx context.Context, out io.Writer, manifests ManifestList) (ManifestList, error) { +func (c *CLI) Apply(ctx context.Context, out io.Writer, manifests ManifestList) error { // Only redeploy modified or new manifests // TODO(dgageot): should we delete a manifest that was deployed and is not anymore? updated := c.previousApply.Diff(manifests) logrus.Debugln(len(manifests), "manifests to deploy.", len(updated), "are updated or new") c.previousApply = manifests if len(updated) == 0 { - return nil, nil + return nil } // Add --force flag to delete and redeploy image if changes can't be applied if err := c.Run(ctx, updated.Reader(), out, "apply", c.Flags.Apply, "--force", "-f", "-"); err != nil { - return nil, errors.Wrap(err, "kubectl apply") + return errors.Wrap(err, "kubectl apply") } - return updated, nil + return nil +} + +// ReadManifests reads a list of manifests in yaml format. +func (c *CLI) ReadManifests(ctx context.Context, manifests []string) (ManifestList, error) { + var list []string + for _, manifest := range manifests { + list = append(list, "-f", manifest) + } + + args := c.args("create", []string{"--dry-run", "-oyaml"}, list...) + + cmd := exec.CommandContext(ctx, "kubectl", args...) + buf, err := util.RunCmdOut(cmd) + if err != nil { + return nil, errors.Wrap(err, "kubectl create") + } + + var manifestList ManifestList + manifestList.Append(buf) + logrus.Debugln("manifests", manifestList.String()) + + return manifestList, nil } // Run shells out kubectl CLI. func (c *CLI) Run(ctx context.Context, in io.Reader, out io.Writer, command string, commandFlags []string, arg ...string) error { + args := c.args(command, commandFlags, arg...) + + cmd := exec.CommandContext(ctx, "kubectl", args...) + cmd.Stdin = in + cmd.Stdout = out + cmd.Stderr = out + + return util.RunCmd(cmd) +} + +func (c *CLI) args(command string, commandFlags []string, arg ...string) []string { args := []string{"--context", c.KubeContext} if c.Namespace != "" { args = append(args, "--namespace", c.Namespace) @@ -78,10 +111,5 @@ func (c *CLI) Run(ctx context.Context, in io.Reader, out io.Writer, command stri args = append(args, commandFlags...) args = append(args, arg...) - cmd := exec.CommandContext(ctx, "kubectl", args...) - cmd.Stdin = in - cmd.Stdout = out - cmd.Stderr = out - - return util.RunCmd(cmd) + return args } diff --git a/pkg/skaffold/deploy/kubectl/images.go b/pkg/skaffold/deploy/kubectl/images.go index b3302c0e977..3b814641791 100644 --- a/pkg/skaffold/deploy/kubectl/images.go +++ b/pkg/skaffold/deploy/kubectl/images.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,11 +23,9 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/warnings" ) -// for testing -var warner Warner = &logrusWarner{} - // ReplaceImages replaces image names in a list of manifests. func (l *ManifestList) ReplaceImages(builds []build.Artifact, defaultRepo string) (ManifestList, error) { replacer := newImageReplacer(builds, defaultRepo) @@ -87,7 +85,7 @@ func (r *imageReplacer) NewValue(old interface{}) (bool, interface{}) { func (r *imageReplacer) parseAndReplace(image string) (bool, interface{}) { parsed, err := docker.ParseReference(image) if err != nil { - warner.Warnf("Couldn't parse image: %s", image) + warnings.Printf("Couldn't parse image: %s", image) return false, nil } @@ -107,7 +105,7 @@ func (r *imageReplacer) parseAndReplace(image string) (bool, interface{}) { func (r *imageReplacer) Check() { for imageName := range r.tagsByImageName { if !r.found[imageName] { - warner.Warnf("image [%s] is not used by the deployment", imageName) + warnings.Printf("image [%s] is not used by the deployment", imageName) } } } diff --git a/pkg/skaffold/deploy/kubectl/images_test.go b/pkg/skaffold/deploy/kubectl/images_test.go index 5c0a290a0e8..d82cc30730c 100644 --- a/pkg/skaffold/deploy/kubectl/images_test.go +++ b/pkg/skaffold/deploy/kubectl/images_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,23 +17,13 @@ limitations under the License. package kubectl import ( - "fmt" - "sort" "testing" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/warnings" "github.com/GoogleContainerTools/skaffold/testutil" ) -type fakeWarner struct { - warnings []string -} - -func (l *fakeWarner) Warnf(format string, args ...interface{}) { - l.warnings = append(l.warnings, fmt.Sprintf(format, args...)) - sort.Strings(l.warnings) -} - func TestReplaceImages(t *testing.T) { manifests := ManifestList{[]byte(` apiVersion: v1 @@ -96,9 +86,9 @@ spec: - image: in valid `)} - defer func(w Warner) { warner = w }(warner) - fakeWarner := &fakeWarner{} - warner = fakeWarner + defer func(w warnings.Warner) { warnings.Printf = w }(warnings.Printf) + fakeWarner := &warnings.Collect{} + warnings.Printf = fakeWarner.Warnf resultManifest, err := manifests.ReplaceImages(builds, "") @@ -107,7 +97,7 @@ spec: "Couldn't parse image: in valid", "image [skaffold/unused] is not used by the deployment", "image [skaffold/usedwrongfqn] is not used by the deployment", - }, fakeWarner.warnings) + }, fakeWarner.Warnings) } func TestReplaceEmptyManifest(t *testing.T) { diff --git a/pkg/skaffold/deploy/kubectl/labels.go b/pkg/skaffold/deploy/kubectl/labels.go new file mode 100644 index 00000000000..17bcf387547 --- /dev/null +++ b/pkg/skaffold/deploy/kubectl/labels.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SetLabels add labels to a list of Kubernetes manifests. +func (l *ManifestList) SetLabels(labels map[string]string) (ManifestList, error) { + replacer := newLabelsSetter(labels) + + updated, err := l.Visit(replacer) + if err != nil { + return nil, errors.Wrap(err, "setting labels") + } + + logrus.Debugln("manifests with labels", updated.String()) + + return updated, nil +} + +type labelsSetter struct { + labels map[string]string +} + +func newLabelsSetter(labels map[string]string) *labelsSetter { + return &labelsSetter{ + labels: labels, + } +} + +func (r *labelsSetter) Matches(key string) bool { + return "metadata" == key +} + +func (r *labelsSetter) NewValue(old interface{}) (bool, interface{}) { + if len(r.labels) == 0 { + return false, nil + } + + metadata, ok := old.(map[interface{}]interface{}) + if !ok { + return false, nil + } + + l, present := metadata["labels"] + if !present { + metadata["labels"] = r.labels + return true, metadata + } + + labels, ok := l.(map[interface{}]interface{}) + if !ok { + return false, nil + } + + for k, v := range r.labels { + labels[k] = v + } + + return true, metadata +} diff --git a/pkg/skaffold/deploy/kubectl/labels_test.go b/pkg/skaffold/deploy/kubectl/labels_test.go new file mode 100644 index 00000000000..4d2ca28e75a --- /dev/null +++ b/pkg/skaffold/deploy/kubectl/labels_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestSetLabels(t *testing.T) { + manifests := ManifestList{[]byte(` +apiVersion: v1 +kind: Pod +metadata: + name: getting-started +spec: + containers: + - image: gcr.io/k8s-skaffold/example + name: example +`)} + + expected := ManifestList{[]byte(` +apiVersion: v1 +kind: Pod +metadata: + labels: + key1: value1 + key2: value2 + name: getting-started +spec: + containers: + - image: gcr.io/k8s-skaffold/example + name: example +`)} + + resultManifest, err := manifests.SetLabels(map[string]string{ + "key1": "value1", + "key2": "value2", + }) + + testutil.CheckErrorAndDeepEqual(t, false, err, expected.String(), resultManifest.String()) +} + +func TestAddLabels(t *testing.T) { + manifests := ManifestList{[]byte(` +apiVersion: v1 +kind: Pod +metadata: + labels: + key0: value0 + key1: ignored + name: getting-started +spec: + containers: + - image: gcr.io/k8s-skaffold/example + name: example +`)} + + expected := ManifestList{[]byte(` +apiVersion: v1 +kind: Pod +metadata: + labels: + key0: value0 + key1: value1 + key2: value2 + name: getting-started +spec: + containers: + - image: gcr.io/k8s-skaffold/example + name: example +`)} + + resultManifest, err := manifests.SetLabels(map[string]string{ + "key1": "value1", + "key2": "value2", + }) + + testutil.CheckErrorAndDeepEqual(t, false, err, expected.String(), resultManifest.String()) +} + +func TestSetNoLabel(t *testing.T) { + manifests := ManifestList{[]byte(` +apiVersion: v1 +kind: Pod +metadata: + name: getting-started +spec: + containers: + - image: gcr.io/k8s-skaffold/example + name: example +`)} + + expected := ManifestList{[]byte(` +apiVersion: v1 +kind: Pod +metadata: + name: getting-started +spec: + containers: + - image: gcr.io/k8s-skaffold/example + name: example +`)} + + resultManifest, err := manifests.SetLabels(nil) + + testutil.CheckErrorAndDeepEqual(t, false, err, expected.String(), resultManifest.String()) +} diff --git a/pkg/skaffold/deploy/kubectl/manifests.go b/pkg/skaffold/deploy/kubectl/manifests.go index 83bebb45099..c2b1de22ba3 100644 --- a/pkg/skaffold/deploy/kubectl/manifests.go +++ b/pkg/skaffold/deploy/kubectl/manifests.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package kubectl import ( "bytes" "io" + "regexp" "strings" ) @@ -38,7 +39,13 @@ func (l *ManifestList) String() string { // Append appends the yaml manifests defined in the given buffer. func (l *ManifestList) Append(buf []byte) { - parts := bytes.Split(buf, []byte("\n---")) + // `kubectl create --dry-run -oyaml` outputs manifests without --- separator + // But we can rely on `apiVersion:` being here as a "separator". + buf = regexp. + MustCompile("\n(|---\n)apiVersion: "). + ReplaceAll(buf, []byte("\n---\napiVersion: ")) + + parts := bytes.Split(buf, []byte("\n---\n")) for _, part := range parts { *l = append(*l, part) } diff --git a/pkg/skaffold/deploy/kubectl/manifests_test.go b/pkg/skaffold/deploy/kubectl/manifests_test.go new file mode 100644 index 00000000000..df8a9c87a8d --- /dev/null +++ b/pkg/skaffold/deploy/kubectl/manifests_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/testutil" +) + +const pod1 = `apiVersion: v1 +kind: Pod +metadata: + name: leeroy-web +spec: + containers: + - name: leeroy-web + image: leeroy-web` + +const pod2 = `apiVersion: v1 +kind: Pod +metadata: + name: leeroy-app +spec: + containers: + - name: leeroy-app + image: leeroy-app` + +func TestAppend(t *testing.T) { + var manifests ManifestList + + manifests.Append([]byte(pod1 + "\n---\n" + pod2)) + + testutil.CheckDeepEqual(t, 2, len(manifests)) + testutil.CheckDeepEqual(t, pod1, string(manifests[0])) + testutil.CheckDeepEqual(t, pod2, string(manifests[1])) +} + +func TestAppendWithoutSeperator(t *testing.T) { + var manifests ManifestList + + manifests.Append([]byte(pod1 + "\n" + pod2)) + + testutil.CheckDeepEqual(t, 2, len(manifests)) + testutil.CheckDeepEqual(t, pod1, string(manifests[0])) + testutil.CheckDeepEqual(t, pod2, string(manifests[1])) +} diff --git a/pkg/skaffold/deploy/kubectl/version.go b/pkg/skaffold/deploy/kubectl/version.go index 3a2f73814b4..8964fc4a703 100644 --- a/pkg/skaffold/deploy/kubectl/version.go +++ b/pkg/skaffold/deploy/kubectl/version.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,19 +49,19 @@ func (v ClientVersion) String() string { } // CheckVersion warns the user if their kubectl version is < 1.12.0 -func (c *CLI) CheckVersion() error { - m, err := strconv.Atoi(c.Version().Minor) +func (c *CLI) CheckVersion(ctx context.Context) error { + m, err := strconv.Atoi(c.Version(ctx).Minor) if err != nil { return errors.Wrap(err, "couldn't get kubectl minor version") } if m < 12 { - return errors.New("kubectl version 1.12.0 or greater is recommended for use with skaffold") + return errors.New("kubectl version 1.12.0 or greater is recommended for use with Skaffold") } return nil } // Version returns the client version of kubectl. -func (c *CLI) Version() ClientVersion { +func (c *CLI) Version(ctx context.Context) ClientVersion { c.versionOnce.Do(func() { version := Version{ Client: ClientVersion{ @@ -70,7 +70,7 @@ func (c *CLI) Version() ClientVersion { }, } - buf, err := c.getVersion(context.Background()) + buf, err := c.getVersion(ctx) if err != nil { logrus.Warnln("unable to get kubectl client version", err) } else if err := json.Unmarshal(buf, &version); err != nil { diff --git a/pkg/skaffold/deploy/kubectl/visitor.go b/pkg/skaffold/deploy/kubectl/visitor.go index 1109b31f27f..6d56e970c3e 100644 --- a/pkg/skaffold/deploy/kubectl/visitor.go +++ b/pkg/skaffold/deploy/kubectl/visitor.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/deploy/kubectl_test.go b/pkg/skaffold/deploy/kubectl_test.go index 210b6cf5a06..e88c4bde61a 100644 --- a/pkg/skaffold/deploy/kubectl_test.go +++ b/pkg/skaffold/deploy/kubectl_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,10 @@ import ( "github.com/pkg/errors" ) -const testKubeContext = "kubecontext" +const ( + testKubeContext = "kubecontext" + kubectlVersion = `{"clientVersion":{"major":"1","minor":"12"}}` +) const deploymentWebYAML = `apiVersion: v1 kind: Pod @@ -40,7 +43,7 @@ spec: - name: leeroy-web image: leeroy-web` -const deploymentAppYaml = `apiVersion: v1 +const deploymentAppYAML = `apiVersion: v1 kind: Pod metadata: name: leeroy-app @@ -50,6 +53,12 @@ spec: image: leeroy-app` func TestKubectlDeploy(t *testing.T) { + tmpDir, cleanup := testutil.NewTempDir(t) + defer cleanup() + + tmpDir.Write("deployment.yaml", deploymentWebYAML) + tmpDir.Write("empty.ignored", "") + var tests = []struct { description string cfg *latest.KubectlDeploy @@ -58,30 +67,23 @@ func TestKubectlDeploy(t *testing.T) { shouldErr bool }{ { - description: "parameter mismatch", - shouldErr: true, - cfg: &latest.KubectlDeploy{ - Manifests: []string{"deployment.yaml"}, - }, - builds: []build.Artifact{ - { - ImageName: "leeroy-web", - Tag: "leeroy-web:v1", - }, - }, + description: "no manifest", + cfg: &latest.KubectlDeploy{}, + command: testutil.NewFakeCmd(t).WithRunOut("kubectl version --client -ojson", kubectlVersion), }, { description: "missing manifest file", - shouldErr: true, cfg: &latest.KubectlDeploy{ - Manifests: []string{"deployment.yaml"}, + Manifests: []string{"missing.yaml"}, }, - builds: []build.Artifact{ - { - ImageName: "leeroy-web", - Tag: "leeroy-web:123", - }, + command: testutil.NewFakeCmd(t).WithRunOut("kubectl version --client -ojson", kubectlVersion), + }, + { + description: "ignore non-manifest", + cfg: &latest.KubectlDeploy{ + Manifests: []string{"*.ignored"}, }, + command: testutil.NewFakeCmd(t).WithRunOut("kubectl version --client -ojson", kubectlVersion), }, { description: "deploy success", @@ -89,34 +91,31 @@ func TestKubectlDeploy(t *testing.T) { Manifests: []string{"deployment.yaml"}, }, command: testutil.NewFakeCmd(t). - WithRunOut("kubectl version --client -ojson", "1.12"). + WithRunOut("kubectl version --client -ojson", kubectlVersion). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment.yaml"), deploymentWebYAML). WithRun("kubectl --context kubecontext --namespace testNamespace apply --force -f -"), - builds: []build.Artifact{ - { - ImageName: "leeroy-web", - Tag: "leeroy-web:123", - }, - }, + builds: []build.Artifact{{ + ImageName: "leeroy-web", + Tag: "leeroy-web:123", + }}, }, { description: "deploy command error", - shouldErr: true, cfg: &latest.KubectlDeploy{ Manifests: []string{"deployment.yaml"}, }, command: testutil.NewFakeCmd(t). - WithRunOut("kubectl version --client -ojson", "1.12"). + WithRunOut("kubectl version --client -ojson", kubectlVersion). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment.yaml"), deploymentWebYAML). WithRunErr("kubectl --context kubecontext --namespace testNamespace apply --force -f -", fmt.Errorf("")), - builds: []build.Artifact{ - { - ImageName: "leeroy-web", - Tag: "leeroy-web:123", - }, - }, + builds: []build.Artifact{{ + ImageName: "leeroy-web", + Tag: "leeroy-web:123", + }}, + shouldErr: true, }, { description: "additional flags", - shouldErr: true, cfg: &latest.KubectlDeploy{ Manifests: []string{"deployment.yaml"}, Flags: latest.KubectlFlags{ @@ -126,31 +125,24 @@ func TestKubectlDeploy(t *testing.T) { }, }, command: testutil.NewFakeCmd(t). - WithRunOut("kubectl version --client -ojson", "1.12"). + WithRunOut("kubectl version --client -ojson", kubectlVersion). + WithRunOut("kubectl --context kubecontext --namespace testNamespace -v=0 create --dry-run -oyaml -f "+tmpDir.Path("deployment.yaml"), deploymentWebYAML). WithRunErr("kubectl --context kubecontext --namespace testNamespace -v=0 apply --overwrite=true --force -f -", fmt.Errorf("")), - builds: []build.Artifact{ - { - ImageName: "leeroy-web", - Tag: "leeroy-web:123", - }, - }, + builds: []build.Artifact{{ + ImageName: "leeroy-web", + Tag: "leeroy-web:123", + }}, + shouldErr: true, }, } - tmpDir, cleanup := testutil.NewTempDir(t) - defer cleanup() - - tmpDir.Write("deployment.yaml", deploymentWebYAML) - for _, test := range tests { t.Run(test.description, func(t *testing.T) { - if test.command != nil { - defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) - util.DefaultExecCommand = test.command - } + defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) + util.DefaultExecCommand = test.command k := NewKubectlDeployer(tmpDir.Root(), test.cfg, testKubeContext, testNamespace, "") - _, err := k.Deploy(context.Background(), ioutil.Discard, test.builds) + err := k.Deploy(context.Background(), ioutil.Discard, test.builds, nil) testutil.CheckError(t, test.shouldErr, err) }) @@ -158,6 +150,11 @@ func TestKubectlDeploy(t *testing.T) { } func TestKubectlCleanup(t *testing.T) { + tmpDir, cleanup := testutil.NewTempDir(t) + defer cleanup() + + tmpDir.Write("deployment.yaml", deploymentWebYAML) + var tests = []struct { description string cfg *latest.KubectlDeploy @@ -169,14 +166,18 @@ func TestKubectlCleanup(t *testing.T) { cfg: &latest.KubectlDeploy{ Manifests: []string{"deployment.yaml"}, }, - command: testutil.NewFakeCmd(t).WithRun("kubectl --context kubecontext --namespace testNamespace delete --ignore-not-found=true -f -"), + command: testutil.NewFakeCmd(t). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment.yaml"), deploymentWebYAML). + WithRun("kubectl --context kubecontext --namespace testNamespace delete --ignore-not-found=true -f -"), }, { description: "cleanup error", cfg: &latest.KubectlDeploy{ Manifests: []string{"deployment.yaml"}, }, - command: testutil.NewFakeCmd(t).WithRunErr("kubectl --context kubecontext --namespace testNamespace delete --ignore-not-found=true -f -", errors.New("BUG")), + command: testutil.NewFakeCmd(t). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment.yaml"), deploymentWebYAML). + WithRunErr("kubectl --context kubecontext --namespace testNamespace delete --ignore-not-found=true -f -", errors.New("BUG")), shouldErr: true, }, { @@ -189,21 +190,16 @@ func TestKubectlCleanup(t *testing.T) { Delete: []string{"--grace-period=1"}, }, }, - command: testutil.NewFakeCmd(t).WithRun("kubectl --context kubecontext --namespace testNamespace -v=0 delete --grace-period=1 --ignore-not-found=true -f -"), + command: testutil.NewFakeCmd(t). + WithRunOut("kubectl --context kubecontext --namespace testNamespace -v=0 create --dry-run -oyaml -f "+tmpDir.Path("deployment.yaml"), deploymentWebYAML). + WithRun("kubectl --context kubecontext --namespace testNamespace -v=0 delete --grace-period=1 --ignore-not-found=true -f -"), }, } - tmpDir, cleanup := testutil.NewTempDir(t) - defer cleanup() - - tmpDir.Write("deployment.yaml", deploymentWebYAML) - for _, test := range tests { t.Run(test.description, func(t *testing.T) { - if test.command != nil { - defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) - util.DefaultExecCommand = test.command - } + defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) + util.DefaultExecCommand = test.command k := NewKubectlDeployer(tmpDir.Root(), test.cfg, testKubeContext, testNamespace, "") err := k.Cleanup(context.Background(), ioutil.Discard) @@ -214,12 +210,20 @@ func TestKubectlCleanup(t *testing.T) { } func TestKubectlRedeploy(t *testing.T) { + tmpDir, cleanup := testutil.NewTempDir(t) + defer cleanup() + tmpDir.Write("deployment-web.yaml", deploymentWebYAML) + tmpDir.Write("deployment-app.yaml", deploymentAppYAML) + defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) util.DefaultExecCommand = testutil.NewFakeCmd(t). - WithRunOut("kubectl version --client -ojson", "1.12"). + WithRunOut("kubectl version --client -ojson", kubectlVersion). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment-app.yaml")+" -f "+tmpDir.Path("deployment-web.yaml"), deploymentAppYAML+"\n"+deploymentWebYAML). WithRunInput("kubectl --context kubecontext --namespace testNamespace apply --force -f -", `apiVersion: v1 kind: Pod metadata: + labels: + skaffold-deployer: kubectl name: leeroy-app spec: containers: @@ -229,48 +233,50 @@ spec: apiVersion: v1 kind: Pod metadata: + labels: + skaffold-deployer: kubectl name: leeroy-web spec: containers: - image: leeroy-web:v1 name: leeroy-web`). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment-app.yaml")+" -f "+tmpDir.Path("deployment-web.yaml"), deploymentAppYAML+"\n"+deploymentWebYAML). WithRunInput("kubectl --context kubecontext --namespace testNamespace apply --force -f -", `apiVersion: v1 kind: Pod metadata: + labels: + skaffold-deployer: kubectl name: leeroy-app spec: containers: - image: leeroy-app:v2 - name: leeroy-app`) - - tmpDir, cleanup := testutil.NewTempDir(t) - defer cleanup() - tmpDir.Write("deployment-web.yaml", deploymentWebYAML) - tmpDir.Write("deployment-app.yaml", deploymentAppYaml) + name: leeroy-app`). + WithRunOut("kubectl --context kubecontext --namespace testNamespace create --dry-run -oyaml -f "+tmpDir.Path("deployment-app.yaml")+" -f "+tmpDir.Path("deployment-web.yaml"), deploymentAppYAML+"\n"+deploymentWebYAML) cfg := &latest.KubectlDeploy{ - Manifests: []string{"deployment-web.yaml", "deployment-app.yaml"}, + Manifests: []string{"*.yaml"}, } deployer := NewKubectlDeployer(tmpDir.Root(), cfg, testKubeContext, testNamespace, "") + labellers := []Labeller{deployer} // Deploy one manifest - _, err := deployer.Deploy(context.Background(), ioutil.Discard, []build.Artifact{ + err := deployer.Deploy(context.Background(), ioutil.Discard, []build.Artifact{ {ImageName: "leeroy-web", Tag: "leeroy-web:v1"}, {ImageName: "leeroy-app", Tag: "leeroy-app:v1"}, - }) + }, labellers) testutil.CheckError(t, false, err) // Deploy one manifest since only one image is updated - _, err = deployer.Deploy(context.Background(), ioutil.Discard, []build.Artifact{ + err = deployer.Deploy(context.Background(), ioutil.Discard, []build.Artifact{ {ImageName: "leeroy-web", Tag: "leeroy-web:v1"}, {ImageName: "leeroy-app", Tag: "leeroy-app:v2"}, - }) + }, labellers) testutil.CheckError(t, false, err) // Deploy zero manifest since no image is updated - _, err = deployer.Deploy(context.Background(), ioutil.Discard, []build.Artifact{ + err = deployer.Deploy(context.Background(), ioutil.Discard, []build.Artifact{ {ImageName: "leeroy-web", Tag: "leeroy-web:v1"}, {ImageName: "leeroy-app", Tag: "leeroy-app:v2"}, - }) + }, labellers) testutil.CheckError(t, false, err) } diff --git a/pkg/skaffold/deploy/kustomize.go b/pkg/skaffold/deploy/kustomize.go index ffa695ee2f0..dde74025bf2 100644 --- a/pkg/skaffold/deploy/kustomize.go +++ b/pkg/skaffold/deploy/kustomize.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ type kustomization struct { CRDs []string `yaml:"crds"` PatchesJSON6902 []patchJSON6902 `yaml:"patchesJson6902"` ConfigMapGenerator []configMapGenerator `yaml:"configMapGenerator"` + SecretGenerator []secretGenerator `yaml:"secretGenerator"` } type patchJSON6902 struct { @@ -52,6 +53,10 @@ type configMapGenerator struct { Files []string `yaml:"files"` } +type secretGenerator struct { + Files []string `yaml:"files"` +} + // KustomizeDeployer deploys workflows using kustomize CLI. type KustomizeDeployer struct { *latest.KustomizeDeploy @@ -80,32 +85,32 @@ func (k *KustomizeDeployer) Labels() map[string]string { } // Deploy runs `kubectl apply` on the manifest generated by kustomize. -func (k *KustomizeDeployer) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact) ([]Artifact, error) { - color.Default.Fprintln(out, "kubectl client version:", k.kubectl.Version()) - if err := k.kubectl.CheckVersion(); err != nil { +func (k *KustomizeDeployer) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact, labellers []Labeller) error { + color.Default.Fprintln(out, "kubectl client version:", k.kubectl.Version(ctx)) + if err := k.kubectl.CheckVersion(ctx); err != nil { color.Default.Fprintln(out, err) } manifests, err := k.readManifests(ctx) if err != nil { - return nil, errors.Wrap(err, "reading manifests") + return errors.Wrap(err, "reading manifests") } if len(manifests) == 0 { - return nil, nil + return nil } manifests, err = manifests.ReplaceImages(builds, k.defaultRepo) if err != nil { - return nil, errors.Wrap(err, "replacing images in manifests") + return errors.Wrap(err, "replacing images in manifests") } - updated, err := k.kubectl.Apply(ctx, out, manifests) + manifests, err = manifests.SetLabels(merge(labellers...)) if err != nil { - return nil, errors.Wrap(err, "apply") + return errors.Wrap(err, "setting labels in manifests") } - return parseManifestsForDeploys(k.kubectl.Namespace, updated) + return k.kubectl.Apply(ctx, out, manifests) } // Cleanup deletes what was deployed by calling Deploy. @@ -155,6 +160,9 @@ func dependenciesForKustomization(dir string) ([]string, error) { for _, generator := range content.ConfigMapGenerator { deps = append(deps, joinPaths(dir, generator.Files)...) } + for _, generator := range content.SecretGenerator { + deps = append(deps, joinPaths(dir, generator.Files)...) + } return deps, nil } diff --git a/pkg/skaffold/deploy/kustomize_test.go b/pkg/skaffold/deploy/kustomize_test.go index ef5708f79c2..fc14c4742c6 100644 --- a/pkg/skaffold/deploy/kustomize_test.go +++ b/pkg/skaffold/deploy/kustomize_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -58,6 +58,13 @@ func TestDependenciesForKustomization(t *testing.T) { - files: [app2.properties, app3.properties]`, expected: []string{"kustomization.yaml", "app1.properties", "app2.properties", "app3.properties"}, }, + { + description: "secretGenerator", + yaml: `secretGenerator: +- files: [secret1.file] +- files: [secret2.file, secret3.file]`, + expected: []string{"kustomization.yaml", "secret1.file", "secret2.file", "secret3.file"}, + }, { description: "unknown base", yaml: `bases: [other]`, diff --git a/pkg/skaffold/deploy/labels.go b/pkg/skaffold/deploy/labels.go index a2b7b4fbc50..1ebce907943 100644 --- a/pkg/skaffold/deploy/labels.go +++ b/pkg/skaffold/deploy/labels.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,57 +17,37 @@ limitations under the License. package deploy import ( - "context" "encoding/json" "fmt" - "io" "strings" "time" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" kubectx "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes/context" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" patch "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" ) +// Artifact contains all information about a completed deployment +type Artifact struct { + Obj runtime.Object + Namespace string +} + // Labeller can give key/value labels to set on deployed resources. type Labeller interface { Labels() map[string]string } -type withLabels struct { - Deployer - - labellers []Labeller -} - -// WithLabels creates a deployer that sets labels on deployed resources. -func WithLabels(d Deployer, labellers ...Labeller) Deployer { - return &withLabels{ - Deployer: d, - labellers: labellers, - } -} - -func (w *withLabels) Deploy(ctx context.Context, out io.Writer, artifacts []build.Artifact) ([]Artifact, error) { - dRes, err := w.Deployer.Deploy(ctx, out, artifacts) - - labelDeployResults(merge(w.labellers...), dRes) - - return dRes, err -} - // merge merges the labels from multiple sources. func merge(sources ...Labeller) map[string]string { merged := make(map[string]string) diff --git a/pkg/skaffold/deploy/testdata/foo/templates/deployment.yaml b/pkg/skaffold/deploy/testdata/foo/templates/deployment.yaml index 0900a4c4e3d..829363e3c9b 100644 --- a/pkg/skaffold/deploy/testdata/foo/templates/deployment.yaml +++ b/pkg/skaffold/deploy/testdata/foo/templates/deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1beta4 kind: Deployment metadata: name: {{ template "foo.fullname" . }} diff --git a/pkg/skaffold/deploy/util.go b/pkg/skaffold/deploy/util.go index 9015ce96a89..bca7da4d820 100644 --- a/pkg/skaffold/deploy/util.go +++ b/pkg/skaffold/deploy/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/docker/auth.go b/pkg/skaffold/docker/auth.go index bf9f511a058..f1306867336 100644 --- a/pkg/skaffold/docker/auth.go +++ b/pkg/skaffold/docker/auth.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/docker/auth_test.go b/pkg/skaffold/docker/auth_test.go index c1c95536bc0..23fd0a0cec2 100644 --- a/pkg/skaffold/docker/auth_test.go +++ b/pkg/skaffold/docker/auth_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/docker/client.go b/pkg/skaffold/docker/client.go index 8ed37f0b5a5..6d922071a98 100644 --- a/pkg/skaffold/docker/client.go +++ b/pkg/skaffold/docker/client.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ import ( "os" "os/exec" "path/filepath" + "sort" "strings" "sync" @@ -54,8 +55,8 @@ func NewAPIClient() (LocalDaemon, error) { return } - apiClient, err := newAPIClient(kubeContext) - dockerAPIClient = NewLocalDaemon(apiClient) + env, apiClient, err := newAPIClient(kubeContext) + dockerAPIClient = NewLocalDaemon(apiClient, env) dockerAPIClientErr = err }) @@ -63,7 +64,7 @@ func NewAPIClient() (LocalDaemon, error) { } // newAPIClient guesses the docker client to use based on current kubernetes context. -func newAPIClient(kubeContext string) (client.CommonAPIClient, error) { +func newAPIClient(kubeContext string) ([]string, client.CommonAPIClient, error) { if kubeContext == constants.DefaultMinikubeContext { return newMinikubeAPIClient() } @@ -73,19 +74,19 @@ func newAPIClient(kubeContext string) (client.CommonAPIClient, error) { // newEnvAPIClient returns a docker client based on the environment variables set. // It will "negotiate" the highest possible API version supported by both the client // and the server if there is a mismatch. -func newEnvAPIClient() (client.CommonAPIClient, error) { +func newEnvAPIClient() ([]string, client.CommonAPIClient, error) { cli, err := client.NewClientWithOpts(client.FromEnv, client.WithHTTPHeaders(getUserAgentHeader())) if err != nil { - return nil, fmt.Errorf("error getting docker client: %s", err) + return nil, nil, fmt.Errorf("error getting docker client: %s", err) } cli.NegotiateAPIVersion(context.Background()) - return cli, nil + return nil, cli, nil } // newMinikubeAPIClient returns a docker client using the environment variables // provided by minikube. -func newMinikubeAPIClient() (client.CommonAPIClient, error) { +func newMinikubeAPIClient() ([]string, client.CommonAPIClient, error) { env, err := getMinikubeDockerEnv() if err != nil { logrus.Warnf("Could not get minikube docker env, falling back to local docker daemon: %s", err) @@ -102,7 +103,7 @@ func newMinikubeAPIClient() (client.CommonAPIClient, error) { } tlsc, err := tlsconfig.Client(options) if err != nil { - return nil, err + return nil, nil, err } httpclient = &http.Client{ @@ -122,11 +123,20 @@ func newMinikubeAPIClient() (client.CommonAPIClient, error) { version = api.DefaultVersion } - return client.NewClientWithOpts( + api, err := client.NewClientWithOpts( client.WithHost(host), client.WithVersion(version), client.WithHTTPClient(httpclient), client.WithHTTPHeaders(getUserAgentHeader())) + + // Keep the minikube environment variables + var environment []string + for k, v := range env { + environment = append(environment, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(environment) + + return environment, api, err } func getUserAgentHeader() map[string]string { @@ -170,6 +180,7 @@ func getMinikubeDockerEnv() (map[string]string, error) { if err != nil { return nil, errors.Wrap(err, "getting minikube filename") } + cmd := exec.Command(miniKubeFilename, "docker-env", "--shell", "none") out, err := util.RunCmdOut(cmd) if err != nil { diff --git a/pkg/skaffold/docker/client_test.go b/pkg/skaffold/docker/client_test.go index 6f46fe8d2bd..5f8fb59318f 100644 --- a/pkg/skaffold/docker/client_test.go +++ b/pkg/skaffold/docker/client_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,8 +49,10 @@ func TestNewEnvClient(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { unsetEnvs := testutil.SetEnvs(t, test.envs) - _, err := newEnvAPIClient() - testutil.CheckError(t, test.shouldErr, err) + + env, _, err := newEnvAPIClient() + + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, []string(nil), env) unsetEnvs(t) }) } @@ -60,62 +62,69 @@ func TestNewEnvClient(t *testing.T) { func TestNewMinikubeImageAPIClient(t *testing.T) { var tests = []struct { description string - cmd util.Command - - expected client.CommonAPIClient - shouldErr bool + env string + expected client.CommonAPIClient + expectedEnv []string + shouldErr bool }{ { description: "correct client", - cmd: testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", `DOCKER_TLS_VERIFY=1 + env: `DOCKER_TLS_VERIFY=1 DOCKER_HOST=http://127.0.0.1:8080 DOCKER_CERT_PATH=testdata -DOCKER_API_VERSION=1.23`), +DOCKER_API_VERSION=1.23`, + expectedEnv: []string{"DOCKER_API_VERSION=1.23", "DOCKER_CERT_PATH=testdata", "DOCKER_HOST=http://127.0.0.1:8080", "DOCKER_TLS_VERIFY=1"}, }, { - description: "correct client", - cmd: testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", `DOCKER_TLS_VERIFY=1 + description: "bad certificate", + env: `DOCKER_TLS_VERIFY=1 DOCKER_HOST=http://127.0.0.1:8080 DOCKER_CERT_PATH=bad/cert/path -DOCKER_API_VERSION=1.23`), +DOCKER_API_VERSION=1.23`, shouldErr: true, }, { description: "missing host env, no error", - cmd: testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", `DOCKER_TLS_VERIFY=1 + env: `DOCKER_TLS_VERIFY=1 DOCKER_CERT_PATH=testdata -DOCKER_API_VERSION=1.23`), +DOCKER_API_VERSION=1.23`, + expectedEnv: []string{"DOCKER_API_VERSION=1.23", "DOCKER_CERT_PATH=testdata", "DOCKER_TLS_VERIFY=1"}, }, { description: "missing version env, no error", - cmd: testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", `DOCKER_TLS_VERIFY=1 + env: `DOCKER_TLS_VERIFY=1 DOCKER_HOST=http://127.0.0.1:8080 -DOCKER_CERT_PATH=testdata`), +DOCKER_CERT_PATH=testdata`, + expectedEnv: []string{"DOCKER_CERT_PATH=testdata", "DOCKER_HOST=http://127.0.0.1:8080", "DOCKER_TLS_VERIFY=1"}, }, { - description: "missing version env, no error", - cmd: testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", `DOCKER_TLS_VERIFY=1 + description: "bad url", + env: `DOCKER_TLS_VERIFY=1 DOCKER_HOST=badurl DOCKER_CERT_PATH=testdata -DOCKER_API_VERSION=1.23`), +DOCKER_API_VERSION=1.23`, shouldErr: true, }, { description: "bad env output, should fallback to host docker", - cmd: testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", `DOCKER_TLS_VERIFY=1 + env: `DOCKER_TLS_VERIFY=1 DOCKER_HOST=http://127.0.0.1:8080=toomanyvalues DOCKER_CERT_PATH=testdata -DOCKER_API_VERSION=1.23`), +DOCKER_API_VERSION=1.23`, }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { defer func(c util.Command) { util.DefaultExecCommand = c }(util.DefaultExecCommand) - util.DefaultExecCommand = test.cmd + util.DefaultExecCommand = testutil.NewFakeCmd(t).WithRunOut("minikube docker-env --shell none", test.env) + + env, _, err := newMinikubeAPIClient() - _, err := newMinikubeAPIClient() testutil.CheckError(t, test.shouldErr, err) + if !test.shouldErr { + testutil.CheckDeepEqual(t, test.expectedEnv, env) + } }) } } diff --git a/pkg/skaffold/docker/context.go b/pkg/skaffold/docker/context.go index dcf54fefdb5..48a286a8302 100644 --- a/pkg/skaffold/docker/context.go +++ b/pkg/skaffold/docker/context.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,62 +20,26 @@ import ( "context" "io" "path/filepath" - "strings" - cstorage "cloud.google.com/go/storage" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/pkg/errors" ) -// NormalizeDockerfilePath returns the absolute path to the dockerfile. -func NormalizeDockerfilePath(context, dockerfile string) (string, error) { - if filepath.IsAbs(dockerfile) { - return dockerfile, nil - } - - if !strings.HasPrefix(dockerfile, context) { - dockerfile = filepath.Join(context, dockerfile) - } - return filepath.Abs(dockerfile) -} - func CreateDockerTarContext(ctx context.Context, w io.Writer, workspace string, a *latest.DockerArtifact) error { paths, err := GetDependencies(ctx, workspace, a) if err != nil { return errors.Wrap(err, "getting relative tar paths") } - if err := util.CreateTar(w, workspace, paths); err != nil { - return errors.Wrap(err, "creating tar gz") - } - - return nil -} - -func CreateDockerTarGzContext(ctx context.Context, w io.Writer, workspace string, a *latest.DockerArtifact) error { - paths, err := GetDependencies(ctx, workspace, a) - if err != nil { - return errors.Wrap(err, "getting relative tar paths") + var p []string + for _, path := range paths { + p = append(p, filepath.Join(workspace, path)) } - if err := util.CreateTarGz(w, workspace, paths); err != nil { + if err := util.CreateTar(w, workspace, p); err != nil { return errors.Wrap(err, "creating tar gz") } return nil } - -func UploadContextToGCS(ctx context.Context, workspace string, a *latest.DockerArtifact, bucket, objectName string) error { - c, err := cstorage.NewClient(ctx) - if err != nil { - return errors.Wrap(err, "creating GCS client") - } - defer c.Close() - - w := c.Bucket(bucket).Object(objectName).NewWriter(ctx) - if err := CreateDockerTarGzContext(ctx, w, workspace, a); err != nil { - return errors.Wrap(err, "uploading targz to google storage") - } - return w.Close() -} diff --git a/pkg/skaffold/docker/context_test.go b/pkg/skaffold/docker/context_test.go index 8d870981092..6479a05a2ea 100644 --- a/pkg/skaffold/docker/context_test.go +++ b/pkg/skaffold/docker/context_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,62 +27,68 @@ import ( ) func TestDockerContext(t *testing.T) { - tmpDir, cleanup := testutil.NewTempDir(t) - defer cleanup() + for _, dir := range []string{".", "sub"} { + t.Run(dir, func(t *testing.T) { + tmpDir, cleanup := testutil.NewTempDir(t) + defer cleanup() - imageFetcher := fakeImageFetcher{} - RetrieveImage = imageFetcher.fetch - defer func() { RetrieveImage = retrieveImage }() + imageFetcher := fakeImageFetcher{} + RetrieveImage = imageFetcher.fetch + defer func() { RetrieveImage = retrieveImage }() - artifact := &latest.DockerArtifact{ - DockerfilePath: "Dockerfile", - BuildArgs: map[string]*string{}, - } + artifact := &latest.DockerArtifact{ + DockerfilePath: "Dockerfile", + } - tmpDir.Write("files/ignored.txt", "") - tmpDir.Write("files/included.txt", "") - tmpDir.Write(".dockerignore", "**/ignored.txt\nalsoignored.txt") - tmpDir.Write("Dockerfile", "FROM alpine\nCOPY ./files /files") - tmpDir.Write("ignored.txt", "") - tmpDir.Write("alsoignored.txt", "") + tmpDir.Write(dir+"/files/ignored.txt", "") + tmpDir.Write(dir+"/files/included.txt", "") + tmpDir.Write(dir+"/.dockerignore", "**/ignored.txt\nalsoignored.txt") + tmpDir.Write(dir+"/Dockerfile", "FROM alpine\nCOPY ./files /files") + tmpDir.Write(dir+"/ignored.txt", "") + tmpDir.Write(dir+"/alsoignored.txt", "") - reader, writer := io.Pipe() - go func() { - err := CreateDockerTarContext(context.Background(), writer, tmpDir.Root(), artifact) - if err != nil { - writer.CloseWithError(err) - } else { - writer.Close() - } - }() + reset := testutil.Chdir(t, tmpDir.Root()) + defer reset() - files := make(map[string]bool) - tr := tar.NewReader(reader) - for { - header, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } + reader, writer := io.Pipe() + go func() { + err := CreateDockerTarContext(context.Background(), writer, dir, artifact) + if err != nil { + writer.CloseWithError(err) + } else { + writer.Close() + } + }() - files[header.Name] = true - } + files := make(map[string]bool) + tr := tar.NewReader(reader) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } - if files["ignored.txt"] { - t.Error("File ignored.txt should have been excluded, but was not") - } - if files["alsoignored.txt"] { - t.Error("File alsoignored.txt should have been excluded, but was not") - } - if files["files/ignored.txt"] { - t.Error("File files/ignored.txt should have been excluded, but was not") - } - if !files["files/included.txt"] { - t.Error("File files/included.txt should have been included, but was not") - } - if !files["Dockerfile"] { - t.Error("File Dockerfile should have been included, but was not") + files[header.Name] = true + } + + if files["ignored.txt"] { + t.Error("File ignored.txt should have been excluded, but was not") + } + if files["alsoignored.txt"] { + t.Error("File alsoignored.txt should have been excluded, but was not") + } + if files["files/ignored.txt"] { + t.Error("File files/ignored.txt should have been excluded, but was not") + } + if !files["files/included.txt"] { + t.Error("File files/included.txt should have been included, but was not") + } + if !files["Dockerfile"] { + t.Error("File Dockerfile should have been included, but was not") + } + }) } } diff --git a/pkg/skaffold/docker/image.go b/pkg/skaffold/docker/image.go index 3128aa42bb3..e536bc5a689 100644 --- a/pkg/skaffold/docker/image.go +++ b/pkg/skaffold/docker/image.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -39,10 +39,12 @@ import ( // LocalDaemon talks to a local Docker API. type LocalDaemon interface { Close() error + ExtraEnv() []string ServerVersion(ctx context.Context) (types.Version, error) ConfigFile(ctx context.Context, image string) (*v1.ConfigFile, error) Build(ctx context.Context, out io.Writer, workspace string, a *latest.DockerArtifact, ref string) (string, error) Push(ctx context.Context, out io.Writer, ref string) (string, error) + Pull(ctx context.Context, out io.Writer, ref string) error Load(ctx context.Context, out io.Writer, input io.Reader, ref string) (string, error) Tag(ctx context.Context, image, ref string) error ImageID(ctx context.Context, ref string) (string, error) @@ -50,16 +52,24 @@ type LocalDaemon interface { type localDaemon struct { apiClient client.CommonAPIClient + extraEnv []string imageCache sync.Map } // NewLocalDaemon creates a new LocalDaemon. -func NewLocalDaemon(apiClient client.CommonAPIClient) LocalDaemon { +func NewLocalDaemon(apiClient client.CommonAPIClient, extraEnv []string) LocalDaemon { return &localDaemon{ apiClient: apiClient, + extraEnv: extraEnv, } } +// ExtraEnv returns the env variables needed to point at this local Docker +// eg. minikube. This has be set in addition to the current environment. +func (l *localDaemon) ExtraEnv() []string { + return l.extraEnv +} + // PushResult gives the information on an image that has been pushed. type PushResult struct { Digest string @@ -222,6 +232,24 @@ func (l *localDaemon) Push(ctx context.Context, out io.Writer, ref string) (stri return digest, nil } +// Pull pulls an image reference from a registry. +func (l *localDaemon) Pull(ctx context.Context, out io.Writer, ref string) error { + registryAuth, err := l.encodedRegistryAuth(ctx, DefaultAuthHelper, ref) + if err != nil { + return errors.Wrapf(err, "getting auth config for %s", ref) + } + + rc, err := l.apiClient.ImagePull(ctx, ref, types.ImagePullOptions{ + RegistryAuth: registryAuth, + }) + if err != nil { + return errors.Wrap(err, "pulling image from repository") + } + defer rc.Close() + + return streamDockerMessages(out, rc, nil) +} + // Load loads an image from a tar file. Returns the imageID for the loaded image. func (l *localDaemon) Load(ctx context.Context, out io.Writer, input io.Reader, ref string) (string, error) { resp, err := l.apiClient.ImageLoad(ctx, input, false) diff --git a/pkg/skaffold/docker/image_test.go b/pkg/skaffold/docker/image_test.go index 3f872bd2fda..92d498facff 100644 --- a/pkg/skaffold/docker/image_test.go +++ b/pkg/skaffold/docker/image_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,10 +49,10 @@ func TestPush(t *testing.T) { imageName: "gcr.io/scratchman", api: testutil.FakeAPIClient{ TagToImageID: map[string]string{ - "gcr.io/scratchman": "sha256:abcab", + "gcr.io/scratchman": "sha256:imageIDabcab", }, }, - expectedDigest: "sha256:abcab", + expectedDigest: "sha256:7368613235363a696d61676549446162636162e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, { description: "stream error", diff --git a/pkg/skaffold/docker/parse.go b/pkg/skaffold/docker/parse.go index 221940adf94..22c560797b3 100644 --- a/pkg/skaffold/docker/parse.go +++ b/pkg/skaffold/docker/parse.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -145,7 +145,7 @@ func onbuildInstructions(nodes []*parser.Node) ([]*parser.Node, error) { // Image names are case SENSITIVE img, err := RetrieveImage(from.image) if err != nil { - logrus.Warnf("Error processing base image for ONBUILD triggers: %s. Dependencies may be incomplete.", err) + logrus.Warnf("Error processing base image (%s) for ONBUILD triggers: %s. Dependencies may be incomplete.", from.image, err) continue } @@ -179,7 +179,10 @@ func copiedFiles(nodes []*parser.Node) ([][]string, error) { copied = append(copied, files) } case command.Env: - envs[node.Next.Value] = node.Next.Next.Value + // one env command may define multiple variables + for node := node.Next; node != nil && node.Next != nil; node = node.Next.Next { + envs[node.Value] = node.Next.Value + } } } @@ -198,14 +201,16 @@ func readDockerfile(workspace, absDockerfilePath string, buildArgs map[string]*s return nil, errors.Wrap(err, "parsing dockerfile") } - expandBuildArgs(res.AST.Children, buildArgs) + dockerfileLines := res.AST.Children + + expandBuildArgs(dockerfileLines, buildArgs) - instructions, err := onbuildInstructions(res.AST.Children) + instructions, err := onbuildInstructions(dockerfileLines) if err != nil { return nil, errors.Wrap(err, "listing ONBUILD instructions") } - copied, err := copiedFiles(append(instructions, res.AST.Children...)) + copied, err := copiedFiles(append(instructions, dockerfileLines...)) if err != nil { return nil, errors.Wrap(err, "listing copied files") } @@ -259,6 +264,18 @@ func expandPaths(workspace string, copied [][]string) ([]string, error) { return deps, nil } +// NormalizeDockerfilePath returns the absolute path to the dockerfile. +func NormalizeDockerfilePath(context, dockerfile string) (string, error) { + if filepath.IsAbs(dockerfile) { + return dockerfile, nil + } + + if !strings.HasPrefix(dockerfile, context) { + dockerfile = filepath.Join(context, dockerfile) + } + return filepath.Abs(dockerfile) +} + // GetDependencies finds the sources dependencies for the given docker artifact. // All paths are relative to the workspace. func GetDependencies(ctx context.Context, workspace string, a *latest.DockerArtifact) ([]string, error) { diff --git a/pkg/skaffold/docker/parse_test.go b/pkg/skaffold/docker/parse_test.go index 0ec51f4d49e..3f09944fd7e 100644 --- a/pkg/skaffold/docker/parse_test.go +++ b/pkg/skaffold/docker/parse_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -80,6 +80,13 @@ WORKDIR ${foo} # WORKDIR /bar COPY $foo /quux # COPY bar /quux ` +const multiEnvTest = ` +FROM busybox +ENV baz=bar \ + foo=docker +COPY $foo/nginx.conf . # COPY docker/nginx.conf . +` + const copyDirectory = ` FROM nginx ADD . /etc/ @@ -293,6 +300,13 @@ func TestGetDependencies(t *testing.T) { expected: []string{"Dockerfile", "bar"}, fetched: []string{"busybox"}, }, + { + description: "multiple env test", + dockerfile: multiEnvTest, + workspace: ".", + expected: []string{"Dockerfile", filepath.Join("docker", "nginx.conf")}, + fetched: []string{"busybox"}, + }, { description: "multi file copy", dockerfile: multiFileCopy, @@ -498,8 +512,8 @@ func TestGetDependencies(t *testing.T) { workspace := tmpDir.Path(test.workspace) deps, err := GetDependencies(context.Background(), workspace, &latest.DockerArtifact{ - BuildArgs: test.buildArgs, DockerfilePath: "Dockerfile", + BuildArgs: test.buildArgs, }) testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, deps) diff --git a/pkg/skaffold/docker/reference.go b/pkg/skaffold/docker/reference.go index 7405826cb5b..6100309ca87 100644 --- a/pkg/skaffold/docker/reference.go +++ b/pkg/skaffold/docker/reference.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/docker/reference_test.go b/pkg/skaffold/docker/reference_test.go index bca42fdf316..65db59f955e 100644 --- a/pkg/skaffold/docker/reference_test.go +++ b/pkg/skaffold/docker/reference_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/docker/remote.go b/pkg/skaffold/docker/remote.go index ef029deff11..ac5e9b08e26 100644 --- a/pkg/skaffold/docker/remote.go +++ b/pkg/skaffold/docker/remote.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,19 +23,15 @@ import ( "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) func AddTag(src, target string) error { - srcRef, err := name.ParseReference(src, name.WeakValidation) + logrus.Debugf("attempting to add tag %s to src %s", target, src) + img, err := remoteImage(src) if err != nil { - return errors.Wrap(err, "getting source reference") - } - - auth, err := authn.DefaultKeychain.Resolve(srcRef.Context().Registry) - if err != nil { - return err + return errors.Wrap(err, "getting image") } targetRef, err := name.ParseReference(target, name.WeakValidation) @@ -43,21 +39,12 @@ func AddTag(src, target string) error { return errors.Wrap(err, "getting target reference") } - return addTag(srcRef, targetRef, auth, http.DefaultTransport) -} - -func addTag(ref name.Reference, targetRef name.Reference, auth authn.Authenticator, t http.RoundTripper) error { - tr, err := transport.New(ref.Context().Registry, auth, t, []string{targetRef.Scope(transport.PushScope)}) - if err != nil { - return err - } - - img, err := remote.Image(ref, remote.WithAuth(auth), remote.WithTransport(tr)) + auth, err := authn.DefaultKeychain.Resolve(targetRef.Context().Registry) if err != nil { return err } - return remote.Write(targetRef, img, auth, t) + return remote.Write(targetRef, img, auth, http.DefaultTransport) } func RemoteDigest(identifier string) (string, error) { @@ -94,5 +81,5 @@ func remoteImage(identifier string) (v1.Image, error) { return nil, errors.Wrap(err, "getting default keychain auth") } - return remote.Image(ref, remote.WithAuth(auth), remote.WithTransport(http.DefaultTransport)) + return remote.Image(ref, remote.WithAuth(auth)) } diff --git a/pkg/skaffold/docker/remote_test.go b/pkg/skaffold/docker/remote_test.go new file mode 100644 index 00000000000..c48d50ebe35 --- /dev/null +++ b/pkg/skaffold/docker/remote_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "strings" + "testing" +) + +func TestRemoteDigest(t *testing.T) { + validReferences := []string{ + "python", + "python:3-slim", + } + + for _, ref := range validReferences { + _, err := RemoteDigest(ref) + + // Ignore networking errors + if err != nil && strings.Contains(err.Error(), "could not parse") { + t.Errorf("unable to parse %q: %v", ref, err) + } + } +} diff --git a/pkg/skaffold/gcp/auth.go b/pkg/skaffold/gcp/auth.go index bcc2186c81b..694e439abbb 100644 --- a/pkg/skaffold/gcp/auth.go +++ b/pkg/skaffold/gcp/auth.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/gcp/auth_test.go b/pkg/skaffold/gcp/auth_test.go index d9820717dd2..937465cf287 100644 --- a/pkg/skaffold/gcp/auth_test.go +++ b/pkg/skaffold/gcp/auth_test.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/gcp/projectid.go b/pkg/skaffold/gcp/projectid.go index 4088da44cae..ec4ea9f784d 100644 --- a/pkg/skaffold/gcp/projectid.go +++ b/pkg/skaffold/gcp/projectid.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/gcp/projectid_test.go b/pkg/skaffold/gcp/projectid_test.go index 19ba8dd2b24..68842a13a18 100644 --- a/pkg/skaffold/gcp/projectid_test.go +++ b/pkg/skaffold/gcp/projectid_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/jib/jib.go b/pkg/skaffold/jib/jib.go index 7c8746117ff..ade27ad98c6 100644 --- a/pkg/skaffold/jib/jib.go +++ b/pkg/skaffold/jib/jib.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/jib/jib_gradle.go b/pkg/skaffold/jib/jib_gradle.go index 4d661484f3d..1885c073d56 100644 --- a/pkg/skaffold/jib/jib_gradle.go +++ b/pkg/skaffold/jib/jib_gradle.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -47,8 +47,13 @@ func getCommandGradle(ctx context.Context, workspace string, a *latest.JibGradle } // GenerateGradleArgs generates the arguments to Gradle for building the project as an image. -func GenerateGradleArgs(task string, imageName string, a *latest.JibGradleArtifact) []string { - return []string{gradleCommand(a, task), "--image=" + imageName} +func GenerateGradleArgs(task string, imageName string, a *latest.JibGradleArtifact, skipTests bool) []string { + args := []string{gradleCommand(a, task), "--image=" + imageName} + if skipTests { + args = append(args, "-x", "test") + } + args = append(args, a.Flags...) + return args } func gradleCommand(a *latest.JibGradleArtifact, task string) string { diff --git a/pkg/skaffold/jib/jib_gradle_test.go b/pkg/skaffold/jib/jib_gradle_test.go index 9adc4e99d15..6a62e718453 100644 --- a/pkg/skaffold/jib/jib_gradle_test.go +++ b/pkg/skaffold/jib/jib_gradle_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -146,15 +146,19 @@ func TestGetCommandGradle(t *testing.T) { func TestGenerateGradleArgs(t *testing.T) { var testCases = []struct { - in latest.JibGradleArtifact - out []string + in latest.JibGradleArtifact + skipTests bool + out []string }{ - {latest.JibGradleArtifact{}, []string{":task", "--image=image"}}, - {latest.JibGradleArtifact{Project: "project"}, []string{":project:task", "--image=image"}}, + {latest.JibGradleArtifact{}, false, []string{":task", "--image=image"}}, + {latest.JibGradleArtifact{Flags: []string{"-extra", "args"}}, false, []string{":task", "--image=image", "-extra", "args"}}, + {latest.JibGradleArtifact{}, true, []string{":task", "--image=image", "-x", "test"}}, + {latest.JibGradleArtifact{Project: "project"}, false, []string{":project:task", "--image=image"}}, + {latest.JibGradleArtifact{Project: "project"}, true, []string{":project:task", "--image=image", "-x", "test"}}, } for _, tt := range testCases { - command := GenerateGradleArgs("task", "image", &tt.in) + command := GenerateGradleArgs("task", "image", &tt.in, tt.skipTests) testutil.CheckDeepEqual(t, tt.out, command) } diff --git a/pkg/skaffold/jib/jib_maven.go b/pkg/skaffold/jib/jib_maven.go index ef6aa6013ef..9f8dc4b4900 100644 --- a/pkg/skaffold/jib/jib_maven.go +++ b/pkg/skaffold/jib/jib_maven.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -47,9 +47,13 @@ func getCommandMaven(ctx context.Context, workspace string, a *latest.JibMavenAr } // GenerateMavenArgs generates the arguments to Maven for building the project as an image. -func GenerateMavenArgs(goal string, imageName string, a *latest.JibMavenArtifact) []string { +func GenerateMavenArgs(goal string, imageName string, a *latest.JibMavenArtifact, skipTests bool) []string { args := mavenArgs(a) + if skipTests { + args = append(args, "-DskipTests=true") + } + if a.Module == "" { // single-module project args = append(args, "prepare-package", "jib:"+goal) @@ -66,6 +70,8 @@ func GenerateMavenArgs(goal string, imageName string, a *latest.JibMavenArtifact func mavenArgs(a *latest.JibMavenArtifact) []string { var args []string + args = append(args, a.Flags...) + if a.Profile != "" { args = append(args, "--activate-profiles", a.Profile) } diff --git a/pkg/skaffold/jib/jib_maven_test.go b/pkg/skaffold/jib/jib_maven_test.go index cdb1a966369..df990fd2925 100644 --- a/pkg/skaffold/jib/jib_maven_test.go +++ b/pkg/skaffold/jib/jib_maven_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -99,6 +99,16 @@ func TestGetCommandMaven(t *testing.T) { return MavenCommand.CreateCommand(ctx, workspace, []string{"--non-recursive", "jib:_skaffold-files", "--quiet"}) }, }, + { + description: "maven with extra flags", + jibMavenArtifact: latest.JibMavenArtifact{ + Flags: []string{"-DskipTests", "-x"}, + }, + filesInWorkspace: []string{}, + expectedCmd: func(workspace string) *exec.Cmd { + return MavenCommand.CreateCommand(ctx, workspace, []string{"-DskipTests", "-x", "--non-recursive", "jib:_skaffold-files", "--quiet"}) + }, + }, { description: "maven with profile", jibMavenArtifact: latest.JibMavenArtifact{Profile: "profile"}, @@ -161,17 +171,22 @@ func TestGetCommandMaven(t *testing.T) { func TestGenerateMavenArgs(t *testing.T) { var testCases = []struct { - in latest.JibMavenArtifact - out []string + in latest.JibMavenArtifact + skipTests bool + out []string }{ - {latest.JibMavenArtifact{}, []string{"--non-recursive", "prepare-package", "jib:goal", "-Dimage=image"}}, - {latest.JibMavenArtifact{Profile: "profile"}, []string{"--activate-profiles", "profile", "--non-recursive", "prepare-package", "jib:goal", "-Dimage=image"}}, - {latest.JibMavenArtifact{Module: "module"}, []string{"--projects", "module", "--also-make", "package", "-Dimage=image"}}, - {latest.JibMavenArtifact{Module: "module", Profile: "profile"}, []string{"--activate-profiles", "profile", "--projects", "module", "--also-make", "package", "-Dimage=image"}}, + {latest.JibMavenArtifact{}, false, []string{"--non-recursive", "prepare-package", "jib:goal", "-Dimage=image"}}, + {latest.JibMavenArtifact{}, true, []string{"--non-recursive", "-DskipTests=true", "prepare-package", "jib:goal", "-Dimage=image"}}, + {latest.JibMavenArtifact{Profile: "profile"}, false, []string{"--activate-profiles", "profile", "--non-recursive", "prepare-package", "jib:goal", "-Dimage=image"}}, + {latest.JibMavenArtifact{Profile: "profile"}, true, []string{"--activate-profiles", "profile", "--non-recursive", "-DskipTests=true", "prepare-package", "jib:goal", "-Dimage=image"}}, + {latest.JibMavenArtifact{Module: "module"}, false, []string{"--projects", "module", "--also-make", "package", "-Dimage=image"}}, + {latest.JibMavenArtifact{Module: "module"}, true, []string{"--projects", "module", "--also-make", "-DskipTests=true", "package", "-Dimage=image"}}, + {latest.JibMavenArtifact{Module: "module", Profile: "profile"}, false, []string{"--activate-profiles", "profile", "--projects", "module", "--also-make", "package", "-Dimage=image"}}, + {latest.JibMavenArtifact{Module: "module", Profile: "profile"}, true, []string{"--activate-profiles", "profile", "--projects", "module", "--also-make", "-DskipTests=true", "package", "-Dimage=image"}}, } for _, tt := range testCases { - args := GenerateMavenArgs("goal", "image", &tt.in) + args := GenerateMavenArgs("goal", "image", &tt.in, tt.skipTests) testutil.CheckDeepEqual(t, tt.out, args) } diff --git a/pkg/skaffold/jib/jib_test.go b/pkg/skaffold/jib/jib_test.go index 0f433be873b..bda2a6327c2 100644 --- a/pkg/skaffold/jib/jib_test.go +++ b/pkg/skaffold/jib/jib_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/client.go b/pkg/skaffold/kubernetes/client.go index 9b3727d9a84..0f5f8c9dcac 100644 --- a/pkg/skaffold/kubernetes/client.go +++ b/pkg/skaffold/kubernetes/client.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/colorpicker.go b/pkg/skaffold/kubernetes/colorpicker.go index 3f55d8c0e29..a71e621f065 100644 --- a/pkg/skaffold/kubernetes/colorpicker.go +++ b/pkg/skaffold/kubernetes/colorpicker.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/colorpicker_test.go b/pkg/skaffold/kubernetes/colorpicker_test.go index 9c18acfe3b8..869ea116085 100644 --- a/pkg/skaffold/kubernetes/colorpicker_test.go +++ b/pkg/skaffold/kubernetes/colorpicker_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/context/context.go b/pkg/skaffold/kubernetes/context/context.go index f585dc349a0..85f4ff96031 100644 --- a/pkg/skaffold/kubernetes/context/context.go +++ b/pkg/skaffold/kubernetes/context/context.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/context/context_test.go b/pkg/skaffold/kubernetes/context/context_test.go index 7e920482e79..d0f46a972f5 100644 --- a/pkg/skaffold/kubernetes/context/context_test.go +++ b/pkg/skaffold/kubernetes/context/context_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/log.go b/pkg/skaffold/kubernetes/log.go index 2d95a41d351..86dd1486bf9 100644 --- a/pkg/skaffold/kubernetes/log.go +++ b/pkg/skaffold/kubernetes/log.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -43,6 +43,7 @@ var DynamicClient = GetDynamicClient type LogAggregator struct { output io.Writer podSelector PodSelector + namespaces []string colorPicker ColorPicker muted int32 @@ -52,10 +53,11 @@ type LogAggregator struct { } // NewLogAggregator creates a new LogAggregator for a given output. -func NewLogAggregator(out io.Writer, baseImageNames []string, podSelector PodSelector) *LogAggregator { +func NewLogAggregator(out io.Writer, baseImageNames []string, podSelector PodSelector, namespaces []string) *LogAggregator { return &LogAggregator{ output: out, podSelector: podSelector, + namespaces: namespaces, colorPicker: NewColorPicker(baseImageNames), trackedContainers: trackedContainers{ ids: map[string]bool{}, @@ -70,19 +72,21 @@ func (a *LogAggregator) Start(ctx context.Context) error { a.cancel = cancel a.startTime = time.Now() - watcher, err := PodWatcher() + aggregate := make(chan watch.Event) + stopWatchers, err := AggregatePodWatcher(a.namespaces, aggregate) if err != nil { - return errors.Wrap(err, "initializing pod watcher") + stopWatchers() + return errors.Wrap(err, "initializing aggregate pod watcher") } go func() { - defer watcher.Stop() + defer stopWatchers() for { select { case <-cancelCtx.Done(): return - case evt, ok := <-watcher.ResultChan(): + case evt, ok := <-aggregate: if !ok { return } @@ -192,7 +196,7 @@ func (a *LogAggregator) streamRequest(ctx context.Context, headerColor color.Col if _, err := headerColor.Fprintf(a.output, "%s ", header); err != nil { return errors.Wrap(err, "writing pod prefix header to out") } - if _, err := fmt.Fprint(a.output, string(line)); err != nil { + if _, err := color.White.Fprint(a.output, string(line)); err != nil { return errors.Wrap(err, "writing pod log to out") } } diff --git a/pkg/skaffold/kubernetes/log_test.go b/pkg/skaffold/kubernetes/log_test.go index 273de1669a0..98d95552683 100644 --- a/pkg/skaffold/kubernetes/log_test.go +++ b/pkg/skaffold/kubernetes/log_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/port_forward.go b/pkg/skaffold/kubernetes/port_forward.go index bc7bdbcadc6..25095838f61 100644 --- a/pkg/skaffold/kubernetes/port_forward.go +++ b/pkg/skaffold/kubernetes/port_forward.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ import ( "context" "fmt" "io" + "net" "os/exec" "strconv" @@ -38,11 +39,12 @@ type PortForwarder struct { output io.Writer podSelector PodSelector + namespaces []string // forwardedPods is a map of portForwardEntry.key() (string) -> portForwardEntry forwardedPods map[string]*portForwardEntry - // forwardedPorts is a map of port (int32) -> container name (string) + // forwardedPorts is a map of local port (int32) -> portForwardEntry key (string) forwardedPorts map[int32]string } @@ -52,6 +54,7 @@ type portForwardEntry struct { namespace string containerName string port int32 + localPort int32 cancel context.CancelFunc } @@ -64,6 +67,12 @@ type Forwarder interface { type kubectlForwarder struct{} +var ( + // For testing + retrieveAvailablePort = getAvailablePort + isPortAvailable = portAvailable +) + // Forward port-forwards a pod using kubectl port-forward // It returns an error only if the process fails or was terminated by a signal other than SIGTERM func (*kubectlForwarder) Forward(parentCtx context.Context, pfe *portForwardEntry) error { @@ -72,9 +81,7 @@ func (*kubectlForwarder) Forward(parentCtx context.Context, pfe *portForwardEntr ctx, cancel := context.WithCancel(parentCtx) pfe.cancel = cancel - portNumber := fmt.Sprintf("%d", pfe.port) - - cmd := exec.CommandContext(ctx, "kubectl", "port-forward", pfe.podName, portNumber, portNumber, "--namespace", pfe.namespace) + cmd := exec.CommandContext(ctx, "kubectl", "port-forward", pfe.podName, fmt.Sprintf("%d:%d", pfe.localPort, pfe.port), "--namespace", pfe.namespace) buf := &bytes.Buffer{} cmd.Stdout = buf cmd.Stderr = buf @@ -83,7 +90,7 @@ func (*kubectlForwarder) Forward(parentCtx context.Context, pfe *portForwardEntr if errors.Cause(err) == context.Canceled { return nil } - return errors.Wrapf(err, "port forwarding pod: %s/%s, port: %s, err: %s", pfe.namespace, pfe.podName, portNumber, buf.String()) + return errors.Wrapf(err, "port forwarding pod: %s/%s, port: %d to local port: %d, err: %s", pfe.namespace, pfe.podName, pfe.port, pfe.localPort, buf.String()) } go cmd.Wait() @@ -101,11 +108,12 @@ func (*kubectlForwarder) Terminate(p *portForwardEntry) { } // NewPortForwarder returns a struct that tracks and port-forwards pods as they are created and modified -func NewPortForwarder(out io.Writer, podSelector PodSelector) *PortForwarder { +func NewPortForwarder(out io.Writer, podSelector PodSelector, namespaces []string) *PortForwarder { return &PortForwarder{ Forwarder: &kubectlForwarder{}, output: out, podSelector: podSelector, + namespaces: namespaces, forwardedPods: make(map[string]*portForwardEntry), forwardedPorts: make(map[int32]string), } @@ -121,19 +129,21 @@ func (p *PortForwarder) Stop() { // Start begins a pod watcher that port forwards any pods involving containers with exposed ports. // TODO(r2d4): merge this event loop with pod watcher from log writer func (p *PortForwarder) Start(ctx context.Context) error { - watcher, err := PodWatcher() + aggregate := make(chan watch.Event) + stopWatchers, err := AggregatePodWatcher(p.namespaces, aggregate) if err != nil { + stopWatchers() return errors.Wrap(err, "initializing pod watcher") } go func() { - defer watcher.Stop() + defer stopWatchers() for { select { case <-ctx.Done(): return - case evt, ok := <-watcher.ResultChan(): + case evt, ok := <-aggregate: if !ok { return } @@ -175,42 +185,105 @@ func (p *PortForwarder) portForwardPod(ctx context.Context, pod *v1.Pod) error { for _, c := range pod.Spec.Containers { for _, port := range c.Ports { - // If the port is already port-forwarded by another container, - // continue without port-forwarding - currentApp, ok := p.forwardedPorts[port.ContainerPort] - if ok && currentApp != c.Name { - color.LightYellow.Fprintf(p.output, "Port %d for %s is already in use by container %s\n", port.ContainerPort, c.Name, currentApp) + // get current entry for this container + entry, err := p.getCurrentEntry(pod, c, port, resourceVersion) + if err != nil { + color.Red.Fprintf(p.output, "Unable to get port for %s, skipping port-forward: %v", c.Name, err) continue } - - entry := &portForwardEntry{ - resourceVersion: resourceVersion, - podName: pod.Name, - namespace: pod.Namespace, - containerName: c.Name, - port: port.ContainerPort, + if entry.port != entry.localPort { + color.Yellow.Fprintf(p.output, "Forwarding container %s to local port %d.\n", c.Name, entry.localPort) } - - if prevEntry, ok := p.forwardedPods[entry.key()]; ok { - // Check if this is a new generation of pod - if entry.resourceVersion > prevEntry.resourceVersion { - p.Terminate(prevEntry) - } + if err := p.forward(ctx, entry); err != nil { + return errors.Wrap(err, "failed to forward port") } + } + } + return nil +} - color.Default.Fprintln(p.output, fmt.Sprintf("Port Forwarding %s %d -> %d", entry.podName, entry.port, entry.port)) - p.forwardedPods[entry.key()] = entry - p.forwardedPorts[entry.port] = entry.containerName +func (p *PortForwarder) getCurrentEntry(pod *v1.Pod, c v1.Container, port v1.ContainerPort, resourceVersion int) (*portForwardEntry, error) { + // determine if we have seen this before + entry := &portForwardEntry{ + resourceVersion: resourceVersion, + podName: pod.Name, + namespace: pod.Namespace, + containerName: c.Name, + port: port.ContainerPort, + } + // If we have, return the current entry + oldEntry, ok := p.forwardedPods[entry.key()] + if ok { + entry.localPort = oldEntry.localPort + return entry, nil + } + // If another container isn't using this port... + if _, exists := p.forwardedPorts[port.ContainerPort]; !exists { + // ...Then make sure the port is available + if available, err := isPortAvailable(port.ContainerPort, p.forwardedPorts); available && err == nil { + entry.localPort = port.ContainerPort + p.forwardedPorts[entry.localPort] = entry.key() + return entry, nil + } + } + // Else, determine a new local port + localPort, err := retrieveAvailablePort(p.forwardedPorts) + if err != nil { + return nil, errors.Wrap(err, "getting random available port") + } + entry.localPort = localPort + p.forwardedPorts[localPort] = entry.key() + return entry, nil +} - if err := p.Forward(ctx, entry); err != nil { - return errors.Wrap(err, "port forwarding failed") - } +func (p *PortForwarder) forward(ctx context.Context, entry *portForwardEntry) error { + if prevEntry, ok := p.forwardedPods[entry.key()]; ok { + // Check if this is a new generation of pod + if entry.resourceVersion > prevEntry.resourceVersion { + p.Terminate(prevEntry) } } + color.Default.Fprintln(p.output, fmt.Sprintf("Port Forwarding %s/%s %d -> %d", entry.podName, entry.containerName, entry.port, entry.localPort)) + p.forwardedPods[entry.key()] = entry + p.forwardedPorts[entry.localPort] = entry.key() + + if err := p.Forward(ctx, entry); err != nil { + return errors.Wrap(err, "port forwarding failed") + } return nil } +// From https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt, +// ports 4503-4533 are unassigned user ports; first check if any of these are available +// If not, return a random port, which hopefully won't collide with any future containers +func getAvailablePort(forwardedPorts map[int32]string) (int32, error) { + for i := 4503; i <= 4533; i++ { + ok, err := isPortAvailable(int32(i), forwardedPorts) + if ok { + return int32(i), err + } + } + + // get random port + l, err := net.Listen("tcp", ":0") + if err != nil { + return -1, err + } + return int32(l.Addr().(*net.TCPAddr).Port), l.Close() +} + +func portAvailable(p int32, forwardedPorts map[int32]string) (bool, error) { + if _, ok := forwardedPorts[p]; ok { + return false, nil + } + l, err := net.Listen("tcp", fmt.Sprintf(":%d", p)) + if l != nil { + defer l.Close() + } + return err == nil, nil +} + // Key is an identifier for the lock on a port during the skaffold dev cycle. func (p *portForwardEntry) key() string { return fmt.Sprintf("%s-%d", p.containerName, p.port) diff --git a/pkg/skaffold/kubernetes/port_forward_test.go b/pkg/skaffold/kubernetes/port_forward_test.go index 4a1d4dba7b6..2eec3b6ad9f 100644 --- a/pkg/skaffold/kubernetes/port_forward_test.go +++ b/pkg/skaffold/kubernetes/port_forward_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ type testForwarder struct { func (f *testForwarder) Forward(ctx context.Context, pfe *portForwardEntry) error { f.forwardedEntries[pfe.key()] = pfe - f.forwardedPorts[pfe.port] = true + f.forwardedPorts[pfe.localPort] = true return f.forwardErr } @@ -46,6 +46,36 @@ func (f *testForwarder) Terminate(pfe *portForwardEntry) { delete(f.forwardedPorts, pfe.port) } +func mockRetrieveAvailablePort(taken map[int32]struct{}, availablePorts []int32) func(map[int32]string) (int32, error) { + // Return first available port in ports that isn't taken + return func(forwardedPorts map[int32]string) (int32, error) { + for _, p := range availablePorts { + if _, ok := taken[p]; ok { + continue + } + taken[p] = struct{}{} + return p, nil + } + return -1, nil + } +} + +func mockIsPortAvailable(taken map[int32]struct{}, availablePorts []int32) func(int32, map[int32]string) (bool, error) { + // Return true if p is in availablePorts and is not in taken + return func(p int32, forwardedPorts map[int32]string) (bool, error) { + if _, ok := taken[p]; ok { + return false, nil + } + for _, port := range availablePorts { + if p == port { + taken[p] = struct{}{} + return true, nil + } + } + return false, nil + } +} + func newTestForwarder(forwardErr error) *testForwarder { return &testForwarder{ forwardedEntries: map[string]*portForwardEntry{}, @@ -61,6 +91,7 @@ func TestPortForwardPod(t *testing.T) { forwarder *testForwarder expectedPorts map[int32]bool expectedEntries map[string]*portForwardEntry + availablePorts []int32 shouldErr bool }{ { @@ -68,14 +99,52 @@ func TestPortForwardPod(t *testing.T) { expectedPorts: map[int32]bool{ 8080: true, }, + availablePorts: []int32{8080}, + expectedEntries: map[string]*portForwardEntry{ + "containername-8080": { + resourceVersion: 1, + podName: "podname", + containerName: "containername", + port: 8080, + localPort: 8080, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "podname", + ResourceVersion: "1", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "containername", + Ports: []v1.ContainerPort{ + { + ContainerPort: 8080, + }, + }, + }, + }, + }, + }, + }, + }, + { + description: "unavailable container port", + expectedPorts: map[int32]bool{ + 9000: true, + }, expectedEntries: map[string]*portForwardEntry{ "containername-8080": { resourceVersion: 1, podName: "podname", containerName: "containername", port: 8080, + localPort: 9000, }, }, + availablePorts: []int32{9000}, pods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -102,6 +171,7 @@ func TestPortForwardPod(t *testing.T) { expectedPorts: map[int32]bool{}, shouldErr: true, expectedEntries: map[string]*portForwardEntry{}, + availablePorts: []int32{8080}, pods: []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -128,14 +198,16 @@ func TestPortForwardPod(t *testing.T) { expectedPorts: map[int32]bool{ 8080: true, }, - forwarder: newTestForwarder(fmt.Errorf("")), - shouldErr: true, + forwarder: newTestForwarder(fmt.Errorf("")), + shouldErr: true, + availablePorts: []int32{8080}, expectedEntries: map[string]*portForwardEntry{ "containername-8080": { resourceVersion: 1, podName: "podname", containerName: "containername", port: 8080, + localPort: 8080, }, }, pods: []*v1.Pod{ @@ -165,18 +237,21 @@ func TestPortForwardPod(t *testing.T) { 8080: true, 50051: true, }, + availablePorts: []int32{8080, 50051}, expectedEntries: map[string]*portForwardEntry{ "containername-8080": { resourceVersion: 1, podName: "podname", containerName: "containername", port: 8080, + localPort: 8080, }, "containername2-50051": { resourceVersion: 1, podName: "podname2", containerName: "containername2", port: 50051, + localPort: 50051, }, }, pods: []*v1.Pod{ @@ -222,13 +297,23 @@ func TestPortForwardPod(t *testing.T) { description: "two same container ports", expectedPorts: map[int32]bool{ 8080: true, + 9000: true, }, + availablePorts: []int32{8080, 9000}, expectedEntries: map[string]*portForwardEntry{ "containername-8080": { resourceVersion: 1, podName: "podname", containerName: "containername", port: 8080, + localPort: 8080, + }, + "containername2-8080": { + resourceVersion: 1, + podName: "podname2", + containerName: "containername2", + port: 8080, + localPort: 9000, }, }, pods: []*v1.Pod{ @@ -275,12 +360,14 @@ func TestPortForwardPod(t *testing.T) { expectedPorts: map[int32]bool{ 8080: true, }, + availablePorts: []int32{8080}, expectedEntries: map[string]*portForwardEntry{ "containername-8080": { resourceVersion: 2, podName: "podname", containerName: "containername", port: 8080, + localPort: 8080, }, }, pods: []*v1.Pod{ @@ -326,7 +413,21 @@ func TestPortForwardPod(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - p := NewPortForwarder(ioutil.Discard, NewImageList()) + + taken := map[int32]struct{}{} + originalGetAvailablePort := getAvailablePort + retrieveAvailablePort = mockRetrieveAvailablePort(taken, test.availablePorts) + defer func() { + retrieveAvailablePort = originalGetAvailablePort + }() + + originalIsPortAvailable := isPortAvailable + isPortAvailable = mockIsPortAvailable(taken, test.availablePorts) + defer func() { + isPortAvailable = originalIsPortAvailable + }() + + p := NewPortForwarder(ioutil.Discard, NewImageList(), []string{""}) if test.forwarder == nil { test.forwarder = newTestForwarder(nil) } diff --git a/pkg/skaffold/kubernetes/wait.go b/pkg/skaffold/kubernetes/wait.go index a8f03b89376..ab05d1043c2 100644 --- a/pkg/skaffold/kubernetes/wait.go +++ b/pkg/skaffold/kubernetes/wait.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/wait_test.go b/pkg/skaffold/kubernetes/wait_test.go index 1713ac2ef4d..b74a2afbea8 100644 --- a/pkg/skaffold/kubernetes/wait_test.go +++ b/pkg/skaffold/kubernetes/wait_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/kubernetes/watcher.go b/pkg/skaffold/kubernetes/watcher.go index 617054b3fb6..2c2dff0e7ee 100644 --- a/pkg/skaffold/kubernetes/watcher.go +++ b/pkg/skaffold/kubernetes/watcher.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,15 +23,39 @@ import ( ) // PodWatcher returns a watcher that will report on all Pod Events (additions, modifications, etc.) -func PodWatcher() (watch.Interface, error) { +func PodWatcher(namespace string) (watch.Interface, error) { kubeclient, err := Client() if err != nil { return nil, errors.Wrap(err, "getting k8s client") } client := kubeclient.CoreV1() var forever int64 = 3600 * 24 * 365 * 100 - return client.Pods("").Watch(meta_v1.ListOptions{ + return client.Pods(namespace).Watch(meta_v1.ListOptions{ IncludeUninitialized: true, TimeoutSeconds: &forever, }) } + +// AggregatePodWatcher returns a watcher for multiple namespaces. +func AggregatePodWatcher(namespaces []string, aggregate chan watch.Event) (func(), error) { + watchers := make([]watch.Interface, 0, len(namespaces)) + stopWatchers := func() { + for _, w := range watchers { + w.Stop() + } + } + + for _, ns := range namespaces { + watcher, err := PodWatcher(ns) + if err != nil { + return stopWatchers, errors.Wrap(err, "initializing pod watcher for "+ns) + } + watchers = append(watchers, watcher) + go func(w watch.Interface) { + for msg := range w.ResultChan() { + aggregate <- msg + } + }(watcher) + } + return stopWatchers, nil +} diff --git a/pkg/skaffold/plugin/builders/bazel/bazel.go b/pkg/skaffold/plugin/builders/bazel/bazel.go new file mode 100644 index 00000000000..5c718bf8d36 --- /dev/null +++ b/pkg/skaffold/plugin/builders/bazel/bazel.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bazel + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/bazel" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/shared" + plugin "github.com/hashicorp/go-plugin" +) + +// Execute an image build with docker +func Execute() error { + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "bazel": &shared.BuilderPlugin{Impl: bazel.NewBuilder()}, + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: shared.Handshake, + Plugins: pluginMap, + }) + return nil +} diff --git a/pkg/skaffold/plugin/builders/docker/docker.go b/pkg/skaffold/plugin/builders/docker/docker.go new file mode 100644 index 00000000000..ee86fb2b5c3 --- /dev/null +++ b/pkg/skaffold/plugin/builders/docker/docker.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/docker" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/shared" + plugin "github.com/hashicorp/go-plugin" +) + +// Execute an image build with docker +func Execute() error { + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "docker": &shared.BuilderPlugin{Impl: docker.NewBuilder()}, + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: shared.Handshake, + Plugins: pluginMap, + }) + return nil +} diff --git a/pkg/skaffold/build/gcb/cloud_build.go b/pkg/skaffold/plugin/environments/gcb/cloud_build.go similarity index 88% rename from pkg/skaffold/build/gcb/cloud_build.go rename to pkg/skaffold/plugin/environments/gcb/cloud_build.go index f4e27e29c25..d8b900984f5 100644 --- a/pkg/skaffold/build/gcb/cloud_build.go +++ b/pkg/skaffold/plugin/environments/gcb/cloud_build.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,9 +29,9 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/gcp" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sources" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/version" "github.com/pkg/errors" @@ -43,11 +43,11 @@ import ( ) // Build builds a list of artifacts with Google Cloud Build. -func (b *Builder) Build(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]build.Artifact, error) { - return build.InParallel(ctx, out, tagger, artifacts, b.buildArtifactWithCloudBuild) +func (b *Builder) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + return build.InParallel(ctx, out, tags, artifacts, b.buildArtifactWithCloudBuild) } -func (b *Builder) buildArtifactWithCloudBuild(ctx context.Context, out io.Writer, tagger tag.Tagger, artifact *latest.Artifact) (string, error) { +func (b *Builder) buildArtifactWithCloudBuild(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string) (string, error) { client, err := google.DefaultClient(ctx, cloudbuild.CloudPlatformScope) if err != nil { return "", errors.Wrap(err, "getting google client") @@ -85,13 +85,18 @@ func (b *Builder) buildArtifactWithCloudBuild(ctx context.Context, out io.Writer return "", errors.Wrap(err, "checking bucket is in correct project") } - desc, err := b.buildDescription(artifact, cbBucket, buildObject) + desc, err := b.buildDescription(artifact, tag, cbBucket, buildObject) if err != nil { return "", errors.Wrap(err, "could not create build description") } + dependencies, err := b.DependenciesForArtifact(ctx, artifact) + if err != nil { + return "", errors.Wrapf(err, "getting dependencies for %s", artifact.ImageName) + } + color.Default.Fprintf(out, "Pushing code to gs://%s/%s\n", cbBucket, buildObject) - if err := docker.UploadContextToGCS(ctx, artifact.Workspace, artifact.DockerArtifact, cbBucket, buildObject); err != nil { + if err := sources.UploadToGCS(ctx, artifact, cbBucket, buildObject, dependencies); err != nil { return "", errors.Wrap(err, "uploading source tarball") } @@ -151,22 +156,8 @@ watch: return "", errors.Wrap(err, "cleaning up source tar after build") } logrus.Infof("Deleted object %s", buildObject) - builtTag := fmt.Sprintf("%s@%s", artifact.ImageName, digest) - logrus.Infof("Image built at %s", builtTag) - - newTag, err := tagger.GenerateFullyQualifiedImageName(artifact.Workspace, tag.Options{ - ImageName: artifact.ImageName, - Digest: digest, - }) - if err != nil { - return "", errors.Wrap(err, "generating tag") - } - - if err := docker.AddTag(builtTag, newTag); err != nil { - return "", errors.Wrap(err, "tagging image") - } - return newTag, nil + return tag + "@" + digest, nil } func getBuildID(op *cloudbuild.Operation) (string, error) { diff --git a/pkg/skaffold/plugin/environments/gcb/desc.go b/pkg/skaffold/plugin/environments/gcb/desc.go new file mode 100644 index 00000000000..be9878c10de --- /dev/null +++ b/pkg/skaffold/plugin/environments/gcb/desc.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcb + +import ( + "fmt" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/defaults" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/pkg/errors" + cloudbuild "google.golang.org/api/cloudbuild/v1" + yaml "gopkg.in/yaml.v2" +) + +func (b *Builder) buildDescription(artifact *latest.Artifact, tag, bucket, object string) (*cloudbuild.Build, error) { + steps, err := b.buildSteps(artifact, tag) + if err != nil { + return nil, err + } + + return &cloudbuild.Build{ + LogsBucket: bucket, + Source: &cloudbuild.Source{ + StorageSource: &cloudbuild.StorageSource{ + Bucket: bucket, + Object: object, + }, + }, + Steps: steps, + Images: []string{tag}, + Options: &cloudbuild.BuildOptions{ + DiskSizeGb: b.DiskSizeGb, + MachineType: b.MachineType, + }, + Timeout: b.Timeout, + }, nil +} + +func (b *Builder) buildSteps(artifact *latest.Artifact, tag string) ([]*cloudbuild.BuildStep, error) { + switch { + case artifact.BuilderPlugin != nil: + return b.pluginBuildSteps(artifact, tag) + case artifact.DockerArtifact != nil: + return b.dockerBuildSteps(artifact.DockerArtifact, tag), nil + + case artifact.BazelArtifact != nil: + return nil, errors.New("skaffold can't build a bazel artifact with Google Cloud Build") + + case artifact.JibMavenArtifact != nil: + return b.jibMavenBuildSteps(artifact.JibMavenArtifact, tag), nil + + case artifact.JibGradleArtifact != nil: + return b.jibGradleBuildSteps(artifact.JibGradleArtifact, tag), nil + + default: + return nil, fmt.Errorf("undefined artifact type: %+v", artifact.ArtifactType) + } +} + +func (b *Builder) pluginBuildSteps(artifact *latest.Artifact, tag string) ([]*cloudbuild.BuildStep, error) { + switch artifact.BuilderPlugin.Name { + case constants.DockerBuilderPluginName: + var da *latest.DockerArtifact + if err := yaml.Unmarshal(artifact.BuilderPlugin.Contents, &da); err != nil { + return nil, errors.Wrap(err, "getting docker artifact details") + } + if da == nil { + da = &latest.DockerArtifact{} + } + defaults.SetDefaultDockerArtifact(da) + return b.dockerBuildSteps(da, tag), nil + default: + return nil, errors.Errorf("the '%s' builder is not supported", artifact.BuilderPlugin.Name) + } +} diff --git a/pkg/skaffold/build/gcb/desc_test.go b/pkg/skaffold/plugin/environments/gcb/desc_test.go similarity index 54% rename from pkg/skaffold/build/gcb/desc_test.go rename to pkg/skaffold/plugin/environments/gcb/desc_test.go index 5b6e4064613..94ae13d8b24 100644 --- a/pkg/skaffold/build/gcb/desc_test.go +++ b/pkg/skaffold/plugin/environments/gcb/desc_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,36 +23,6 @@ import ( "github.com/GoogleContainerTools/skaffold/testutil" ) -func TestBuildJibMavenDescriptionFail(t *testing.T) { - artifact := &latest.Artifact{ - ArtifactType: latest.ArtifactType{ - JibMavenArtifact: &latest.JibMavenArtifact{}, - }, - } - - builder := Builder{ - GoogleCloudBuild: &latest.GoogleCloudBuild{}, - } - _, err := builder.buildDescription(artifact, "bucket", "object") - - testutil.CheckError(t, true, err) -} - -func TestBuildJibGradleDescriptionFail(t *testing.T) { - artifact := &latest.Artifact{ - ArtifactType: latest.ArtifactType{ - JibGradleArtifact: &latest.JibGradleArtifact{}, - }, - } - - builder := Builder{ - GoogleCloudBuild: &latest.GoogleCloudBuild{}, - } - _, err := builder.buildDescription(artifact, "bucket", "object") - - testutil.CheckError(t, true, err) -} - func TestBuildBazelDescriptionFail(t *testing.T) { artifact := &latest.Artifact{ ArtifactType: latest.ArtifactType{ @@ -63,7 +33,7 @@ func TestBuildBazelDescriptionFail(t *testing.T) { builder := Builder{ GoogleCloudBuild: &latest.GoogleCloudBuild{}, } - _, err := builder.buildDescription(artifact, "bucket", "object") + _, err := builder.buildDescription(artifact, "tag", "bucket", "object") testutil.CheckError(t, true, err) } diff --git a/pkg/skaffold/build/gcb/docker.go b/pkg/skaffold/plugin/environments/gcb/docker.go similarity index 74% rename from pkg/skaffold/build/gcb/docker.go rename to pkg/skaffold/plugin/environments/gcb/docker.go index ab75015ec4b..b7dadd39337 100644 --- a/pkg/skaffold/build/gcb/docker.go +++ b/pkg/skaffold/plugin/environments/gcb/docker.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,22 +17,25 @@ limitations under the License. package gcb import ( + "fmt" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" cloudbuild "google.golang.org/api/cloudbuild/v1" ) -func (b *Builder) dockerBuildSteps(imageName string, artifact *latest.DockerArtifact) []*cloudbuild.BuildStep { +func (b *Builder) dockerBuildSteps(artifact *latest.DockerArtifact, tag string) []*cloudbuild.BuildStep { var steps []*cloudbuild.BuildStep for _, cacheFrom := range artifact.CacheFrom { steps = append(steps, &cloudbuild.BuildStep{ - Name: b.DockerImage, - Args: []string{"pull", cacheFrom}, + Name: b.DockerImage, + Entrypoint: "sh", + Args: []string{"-c", fmt.Sprintf("docker pull %s || true", cacheFrom)}, }) } - args := append([]string{"build", "--tag", imageName, "-f", artifact.DockerfilePath}) + args := append([]string{"build", "--tag", tag, "-f", artifact.DockerfilePath}) args = append(args, docker.GetBuildArgs(artifact)...) args = append(args, ".") diff --git a/pkg/skaffold/build/gcb/docker_test.go b/pkg/skaffold/plugin/environments/gcb/docker_test.go similarity index 84% rename from pkg/skaffold/build/gcb/docker_test.go rename to pkg/skaffold/plugin/environments/gcb/docker_test.go index 8f6c2e3675a..10db31f558c 100644 --- a/pkg/skaffold/build/gcb/docker_test.go +++ b/pkg/skaffold/plugin/environments/gcb/docker_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,7 +27,6 @@ import ( func TestDockerBuildDescription(t *testing.T) { artifact := &latest.Artifact{ - ImageName: "nginx", ArtifactType: latest.ArtifactType{ DockerArtifact: &latest.DockerArtifact{ DockerfilePath: "Dockerfile", @@ -47,7 +46,7 @@ func TestDockerBuildDescription(t *testing.T) { Timeout: "10m", }, } - desc, err := builder.buildDescription(artifact, "bucket", "object") + desc, err := builder.buildDescription(artifact, "nginx", "bucket", "object") expected := cloudbuild.Build{ LogsBucket: "bucket", @@ -61,7 +60,7 @@ func TestDockerBuildDescription(t *testing.T) { Name: "docker/docker", Args: []string{"build", "--tag", "nginx", "-f", "Dockerfile", "--build-arg", "arg1=value1", "--build-arg", "arg2", "."}, }}, - Images: []string{artifact.ImageName}, + Images: []string{"nginx"}, Options: &cloudbuild.BuildOptions{ DiskSizeGb: 100, MachineType: "n1-standard-1", @@ -83,14 +82,16 @@ func TestPullCacheFrom(t *testing.T) { DockerImage: "docker/docker", }, } - steps := builder.dockerBuildSteps("nginx2", artifact) + steps := builder.dockerBuildSteps(artifact, "nginx2") expected := []*cloudbuild.BuildStep{{ - Name: "docker/docker", - Args: []string{"pull", "from/image1"}, + Name: "docker/docker", + Entrypoint: "sh", + Args: []string{"-c", "docker pull from/image1 || true"}, }, { - Name: "docker/docker", - Args: []string{"pull", "from/image2"}, + Name: "docker/docker", + Entrypoint: "sh", + Args: []string{"-c", "docker pull from/image2 || true"}, }, { Name: "docker/docker", Args: []string{"build", "--tag", "nginx2", "-f", "Dockerfile", "--cache-from", "from/image1", "--cache-from", "from/image2", "."}, diff --git a/pkg/skaffold/plugin/environments/gcb/jib.go b/pkg/skaffold/plugin/environments/gcb/jib.go new file mode 100644 index 00000000000..9ac84d9f7d6 --- /dev/null +++ b/pkg/skaffold/plugin/environments/gcb/jib.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcb + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/jib" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +// TODO(dgageot): check that `package` is bound to `jib:build` +func (b *Builder) jibMavenBuildSteps(artifact *latest.JibMavenArtifact, tag string) []*cloudbuild.BuildStep { + return []*cloudbuild.BuildStep{{ + Name: b.MavenImage, + Args: jib.GenerateMavenArgs("dockerBuild", tag, artifact, b.skipTests), + }} +} + +func (b *Builder) jibGradleBuildSteps(artifact *latest.JibGradleArtifact, tag string) []*cloudbuild.BuildStep { + return []*cloudbuild.BuildStep{{ + Name: b.GradleImage, + Args: jib.GenerateGradleArgs("jibDockerBuild", tag, artifact, b.skipTests), + }} +} diff --git a/pkg/skaffold/plugin/environments/gcb/jib_test.go b/pkg/skaffold/plugin/environments/gcb/jib_test.go new file mode 100644 index 00000000000..41c0e4db467 --- /dev/null +++ b/pkg/skaffold/plugin/environments/gcb/jib_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcb + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +func TestJibMavenBuildSteps(t *testing.T) { + var testCases = []struct { + skipTests bool + args []string + }{ + {false, []string{"--non-recursive", "prepare-package", "jib:dockerBuild", "-Dimage=img"}}, + {true, []string{"--non-recursive", "-DskipTests=true", "prepare-package", "jib:dockerBuild", "-Dimage=img"}}, + } + for _, tt := range testCases { + artifact := &latest.JibMavenArtifact{} + + builder := Builder{ + GoogleCloudBuild: &latest.GoogleCloudBuild{ + MavenImage: "maven:3.6.0", + }, + skipTests: tt.skipTests, + } + steps := builder.jibMavenBuildSteps(artifact, "img") + + expected := []*cloudbuild.BuildStep{{ + Name: "maven:3.6.0", + Args: tt.args, + }} + + testutil.CheckDeepEqual(t, expected, steps) + } +} + +func TestJibGradleBuildSteps(t *testing.T) { + var testCases = []struct { + skipTests bool + args []string + }{ + {false, []string{":jibDockerBuild", "--image=img"}}, + {true, []string{":jibDockerBuild", "--image=img", "-x", "test"}}, + } + for _, tt := range testCases { + artifact := &latest.JibGradleArtifact{} + + builder := Builder{ + GoogleCloudBuild: &latest.GoogleCloudBuild{ + GradleImage: "gradle:5.1.1", + }, + skipTests: tt.skipTests, + } + steps := builder.jibGradleBuildSteps(artifact, "img") + + expected := []*cloudbuild.BuildStep{{ + Name: "gradle:5.1.1", + Args: tt.args, + }} + + testutil.CheckDeepEqual(t, expected, steps) + } +} diff --git a/pkg/skaffold/build/gcb/types.go b/pkg/skaffold/plugin/environments/gcb/types.go similarity index 73% rename from pkg/skaffold/build/gcb/types.go rename to pkg/skaffold/plugin/environments/gcb/types.go index dd45be8f490..05618771a6d 100644 --- a/pkg/skaffold/build/gcb/types.go +++ b/pkg/skaffold/plugin/environments/gcb/types.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +17,14 @@ limitations under the License. package gcb import ( + "context" "time" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/pkg/errors" ) const ( @@ -55,12 +59,14 @@ const ( // Builder builds artifacts with Google Cloud Build. type Builder struct { *latest.GoogleCloudBuild + skipTests bool } // NewBuilder creates a new Builder that builds artifacts with Google Cloud Build. -func NewBuilder(cfg *latest.GoogleCloudBuild) *Builder { +func NewBuilder(cfg *latest.GoogleCloudBuild, skipTests bool) *Builder { return &Builder{ GoogleCloudBuild: cfg, + skipTests: skipTests, } } @@ -70,3 +76,12 @@ func (b *Builder) Labels() map[string]string { constants.Labels.Builder: "google-cloud-build", } } + +// DependenciesForArtifact returns the Dockerfile dependencies for this gcb artifact +func (b *Builder) DependenciesForArtifact(ctx context.Context, a *latest.Artifact) ([]string, error) { + paths, err := docker.GetDependencies(ctx, a.Workspace, a.DockerArtifact) + if err != nil { + return nil, errors.Wrapf(err, "getting dependencies for %s", a.ImageName) + } + return util.AbsolutePaths(a.Workspace, paths), nil +} diff --git a/pkg/skaffold/plugin/shared/build.go b/pkg/skaffold/plugin/shared/build.go new file mode 100644 index 00000000000..9f9b52a40aa --- /dev/null +++ b/pkg/skaffold/plugin/shared/build.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" +) + +type PluginBuilder interface { + Init(opts *config.SkaffoldOptions, env *latest.ExecutionEnvironment) + build.Builder +} diff --git a/pkg/skaffold/plugin/shared/server.go b/pkg/skaffold/plugin/shared/server.go new file mode 100644 index 00000000000..ac8e517b603 --- /dev/null +++ b/pkg/skaffold/plugin/shared/server.go @@ -0,0 +1,166 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +import ( + "context" + "io" + "net/rpc" + "os" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + plugin "github.com/hashicorp/go-plugin" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + yaml "gopkg.in/yaml.v2" +) + +// BuilderRPC is an implementation of an rpc client +type BuilderRPC struct { + client *rpc.Client +} + +func (b *BuilderRPC) Init(opts *config.SkaffoldOptions, env *latest.ExecutionEnvironment) { + // We don't expect a response, so we can just use interface{} + var resp interface{} + args := InitArgs{ + Opts: opts, + Env: env, + } + b.client.Call("Plugin.Init", args, &resp) +} + +func (b *BuilderRPC) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + var resp []string + if err := convertPropertiesToBytes([]*latest.Artifact{artifact}); err != nil { + return nil, errors.Wrapf(err, "converting properties to bytes") + } + args := DependencyArgs{artifact} + err := b.client.Call("Plugin.DependenciesForArtifact", args, &resp) + if err != nil { + return nil, err + } + return resp, nil +} + +func (b *BuilderRPC) Labels() map[string]string { + var resp map[string]string + err := b.client.Call("Plugin.Labels", new(interface{}), &resp) + if err != nil { + // Can't return error, so log it instead + logrus.Errorf("Unable to get labels from server: %v", err) + } + return resp +} + +func (b *BuilderRPC) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { + var resp []build.Artifact + if err := convertPropertiesToBytes(artifacts); err != nil { + return nil, errors.Wrapf(err, "converting properties to bytes") + } + args := BuildArgs{ + ImageTags: tags, + Artifacts: artifacts, + } + err := b.client.Call("Plugin.Build", args, &resp) + if err != nil { + return nil, err + } + return resp, nil +} + +func convertPropertiesToBytes(artifacts []*latest.Artifact) error { + for _, a := range artifacts { + if a.BuilderPlugin.Properties == nil { + continue + } + data, err := yaml.Marshal(a.BuilderPlugin.Properties) + if err != nil { + return err + } + a.BuilderPlugin.Contents = data + a.BuilderPlugin.Properties = nil + } + return nil +} + +// BuilderRPCServer is the RPC server that BuilderRPC talks to, conforming to +// the requirements of net/rpc +type BuilderRPCServer struct { + Impl PluginBuilder +} + +func (s *BuilderRPCServer) Init(args InitArgs, resp *interface{}) error { + s.Impl.Init(args.Opts, args.Env) + return nil +} + +func (s *BuilderRPCServer) Labels(args interface{}, resp *map[string]string) error { + *resp = s.Impl.Labels() + return nil +} + +func (s *BuilderRPCServer) Build(b BuildArgs, resp *[]build.Artifact) error { + artifacts, err := s.Impl.Build(context.Background(), os.Stdout, b.ImageTags, b.Artifacts) + if err != nil { + return errors.Wrap(err, "building artifacts") + } + *resp = artifacts + return nil +} + +func (s *BuilderRPCServer) DependenciesForArtifact(d DependencyArgs, resp *[]string) error { + dependencies, err := s.Impl.DependenciesForArtifact(context.Background(), d.Artifact) + if err != nil { + return errors.Wrapf(err, "getting dependencies for %s", d.Artifact.ImageName) + } + *resp = dependencies + return nil +} + +// DependencyArgs are args passed via rpc to the build plugin on DependencyForArtifact() +type DependencyArgs struct { + *latest.Artifact +} + +// InitArgs are args passed via rpc to the builder plugin on Init() +type InitArgs struct { + Opts *config.SkaffoldOptions + Env *latest.ExecutionEnvironment +} + +// BuildArgs are the args passed via rpc to the builder plugin on Build() +type BuildArgs struct { + tag.ImageTags + Artifacts []*latest.Artifact +} + +// BuilderPlugin is the implementation of the hashicorp plugin.Plugin interface +type BuilderPlugin struct { + Impl PluginBuilder +} + +func (p *BuilderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return &BuilderRPCServer{Impl: p.Impl}, nil +} + +func (BuilderPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &BuilderRPC{client: c}, nil +} diff --git a/pkg/skaffold/plugin/shared/shared.go b/pkg/skaffold/plugin/shared/shared.go new file mode 100644 index 00000000000..39b0313408a --- /dev/null +++ b/pkg/skaffold/plugin/shared/shared.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +import ( + plugin "github.com/hashicorp/go-plugin" +) + +// Handshake is a common handshake that is shared by plugin and host. +var Handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + + MagicCookieKey: "SKAFFOLD_BUILDER_PLUGIN", + MagicCookieValue: "hello", +} + +// PluginMap is a map of all accepted plugins +var PluginMap = map[string]plugin.Plugin{ + "docker": &BuilderPlugin{}, + "bazel": &BuilderPlugin{}, +} diff --git a/pkg/skaffold/runner/changes.go b/pkg/skaffold/runner/changes.go index d62bf6f2347..8581f9ace89 100644 --- a/pkg/skaffold/runner/changes.go +++ b/pkg/skaffold/runner/changes.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/runner/deps.go b/pkg/skaffold/runner/deps.go deleted file mode 100644 index ee1a0a62d75..00000000000 --- a/pkg/skaffold/runner/deps.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2018 The Skaffold Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runner - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/bazel" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/jib" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" -) - -// DependenciesForArtifact lists the dependencies for a given artifact. -func DependenciesForArtifact(ctx context.Context, a *latest.Artifact) ([]string, error) { - var ( - paths []string - err error - ) - - switch { - case a.DockerArtifact != nil: - paths, err = docker.GetDependencies(ctx, a.Workspace, a.DockerArtifact) - - case a.BazelArtifact != nil: - paths, err = bazel.GetDependencies(ctx, a.Workspace, a.BazelArtifact) - - case a.JibMavenArtifact != nil: - paths, err = jib.GetDependenciesMaven(ctx, a.Workspace, a.JibMavenArtifact) - - case a.JibGradleArtifact != nil: - paths, err = jib.GetDependenciesGradle(ctx, a.Workspace, a.JibGradleArtifact) - - default: - return nil, fmt.Errorf("undefined artifact type: %+v", a.ArtifactType) - } - - if err != nil { - // if the context was cancelled act as if all is well - // TODO(dgageot): this should be even higher in the call chain. - if ctx.Err() == context.Canceled { - logrus.Debugln(errors.Wrap(err, "ignore error since context is cancelled")) - return nil, nil - } - - return nil, err - } - - var p []string - for _, path := range paths { - // TODO(dgageot): this is only done for jib builder. - if !filepath.IsAbs(path) { - path = filepath.Join(a.Workspace, path) - } - p = append(p, path) - } - return p, nil -} diff --git a/pkg/skaffold/runner/dev.go b/pkg/skaffold/runner/dev.go index d12b6050851..8e07b30f795 100644 --- a/pkg/skaffold/runner/dev.go +++ b/pkg/skaffold/runner/dev.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,12 +18,12 @@ package runner import ( "context" - "io" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sync" @@ -35,11 +35,11 @@ var ErrorConfigurationChanged = errors.New("configuration changed") // Dev watches for changes and runs the skaffold build and deploy // pipeline until interrupted by the user. -func (r *SkaffoldRunner) Dev(ctx context.Context, out io.Writer, artifacts []*latest.Artifact) error { - logger := r.newLogger(out, artifacts) +func (r *SkaffoldRunner) Dev(ctx context.Context, output *config.Output, artifacts []*latest.Artifact) error { + logger := r.newLogger(output.Logs, artifacts) defer logger.Stop() - portForwarder := kubernetes.NewPortForwarder(out, r.imageList) + portForwarder := kubernetes.NewPortForwarder(output.Main, r.imageList, r.namespaces) defer portForwarder.Stop() // Create watcher and register artifacts to build current state of files. @@ -66,7 +66,7 @@ func (r *SkaffoldRunner) Dev(ctx context.Context, out io.Writer, artifacts []*la return ErrorConfigurationChanged case len(changed.needsResync) > 0: for _, s := range changed.needsResync { - color.Default.Fprintf(out, "Syncing %d files for %s\n", len(s.Copy)+len(s.Delete), s.Image) + color.Default.Fprintf(output.Main, "Syncing %d files for %s\n", len(s.Copy)+len(s.Delete), s.Image) if err := r.Syncer.Sync(ctx, s); err != nil { logrus.Warnln("Skipping deploy due to sync error:", err) @@ -74,12 +74,12 @@ func (r *SkaffoldRunner) Dev(ctx context.Context, out io.Writer, artifacts []*la } } case len(changed.needsRebuild) > 0: - if err := r.buildTestDeploy(ctx, out, changed.needsRebuild); err != nil { + if err := r.buildTestDeploy(ctx, output.Main, changed.needsRebuild); err != nil { logrus.Warnln("Skipping deploy due to error:", err) return nil } case changed.needsRedeploy: - if _, err := r.Deploy(ctx, out, r.builds); err != nil { + if err := r.Deploy(ctx, output.Main, r.builds); err != nil { logrus.Warnln("Skipping deploy due to error:", err) return nil } @@ -93,12 +93,12 @@ func (r *SkaffoldRunner) Dev(ctx context.Context, out io.Writer, artifacts []*la for i := range artifacts { artifact := artifacts[i] - if !r.shouldWatch(artifact) { + if !r.IsTargetImage(artifact) { continue } if err := r.Watcher.Register( - func() ([]string, error) { return DependenciesForArtifact(ctx, artifact) }, + func() ([]string, error) { return r.Builder.DependenciesForArtifact(ctx, artifact) }, func(e watch.Events) { changed.AddDirtyArtifact(artifact, e) }, ); err != nil { return errors.Wrapf(err, "watching files for artifact %s", artifact.ImageName) @@ -130,7 +130,7 @@ func (r *SkaffoldRunner) Dev(ctx context.Context, out io.Writer, artifacts []*la } // First run - if err := r.buildTestDeploy(ctx, out, artifacts); err != nil { + if err := r.buildTestDeploy(ctx, output.Main, artifacts); err != nil { return errors.Wrap(err, "exiting dev mode because first run failed") } @@ -147,5 +147,5 @@ func (r *SkaffoldRunner) Dev(ctx context.Context, out io.Writer, artifacts []*la } } - return r.Watcher.Run(ctx, out, onChange) + return r.Watcher.Run(ctx, output.Main, onChange) } diff --git a/pkg/skaffold/runner/dev_test.go b/pkg/skaffold/runner/dev_test.go index 13ea808ab4d..673cd4e7e83 100644 --- a/pkg/skaffold/runner/dev_test.go +++ b/pkg/skaffold/runner/dev_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ import ( "io/ioutil" "testing" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/watch" "github.com/GoogleContainerTools/skaffold/testutil" @@ -82,6 +83,13 @@ func (t *TestWatcher) Run(ctx context.Context, out io.Writer, onChange func() er return nil } +func discardOutput() *config.Output { + return &config.Output{ + Main: ioutil.Discard, + Logs: ioutil.Discard, + } +} + func TestDevFailFirstCycle(t *testing.T) { var tests = []struct { description string @@ -129,7 +137,7 @@ func TestDevFailFirstCycle(t *testing.T) { runner := createRunner(t, test.testBench) runner.Watcher = test.watcher - err := runner.Dev(context.Background(), ioutil.Discard, []*latest.Artifact{{ + err := runner.Dev(context.Background(), discardOutput(), []*latest.Artifact{{ ImageName: "img", }}) @@ -260,7 +268,7 @@ func TestDev(t *testing.T) { testBench: test.testBench, } - err := runner.Dev(context.Background(), ioutil.Discard, []*latest.Artifact{ + err := runner.Dev(context.Background(), discardOutput(), []*latest.Artifact{ {ImageName: "img1"}, {ImageName: "img2"}, }) @@ -325,7 +333,7 @@ func TestDevSync(t *testing.T) { testBench: test.testBench, } - err := runner.Dev(context.Background(), ioutil.Discard, []*latest.Artifact{ + err := runner.Dev(context.Background(), discardOutput(), []*latest.Artifact{ { ImageName: "img1", Sync: map[string]string{ diff --git a/pkg/skaffold/runner/filters.go b/pkg/skaffold/runner/filters.go index 76a71862304..5bdd3d8431a 100644 --- a/pkg/skaffold/runner/filters.go +++ b/pkg/skaffold/runner/filters.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,13 +22,13 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" ) -func (r *SkaffoldRunner) shouldWatch(artifact *latest.Artifact) bool { - if len(r.opts.Watch) == 0 { +func (r *SkaffoldRunner) IsTargetImage(artifact *latest.Artifact) bool { + if len(r.opts.TargetImages) == 0 { return true } - for _, watchExpression := range r.opts.Watch { - if strings.Contains(artifact.ImageName, watchExpression) { + for _, targetImage := range r.opts.TargetImages { + if strings.Contains(artifact.ImageName, targetImage) { return true } } diff --git a/pkg/skaffold/runner/filters_test.go b/pkg/skaffold/runner/filters_test.go index 27f1d7d4dd8..98a74cb3239 100644 --- a/pkg/skaffold/runner/filters_test.go +++ b/pkg/skaffold/runner/filters_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,35 +24,35 @@ import ( "github.com/GoogleContainerTools/skaffold/testutil" ) -func TestShouldWatch(t *testing.T) { +func TestIsTargetImage(t *testing.T) { var tests = []struct { description string - watch []string + targetImages []string expectedMatch bool }{ { description: "match all", - watch: nil, + targetImages: nil, expectedMatch: true, }, { description: "match full name", - watch: []string{"domain/image"}, + targetImages: []string{"domain/image"}, expectedMatch: true, }, { description: "match partial name", - watch: []string{"image"}, + targetImages: []string{"image"}, expectedMatch: true, }, { description: "match any", - watch: []string{"other", "image"}, + targetImages: []string{"other", "image"}, expectedMatch: true, }, { description: "no match", - watch: []string{"other"}, + targetImages: []string{"other"}, expectedMatch: false, }, } @@ -61,11 +61,11 @@ func TestShouldWatch(t *testing.T) { t.Run(test.description, func(t *testing.T) { runner := &SkaffoldRunner{ opts: &config.SkaffoldOptions{ - Watch: test.watch, + TargetImages: test.targetImages, }, } - match := runner.shouldWatch(&latest.Artifact{ + match := runner.IsTargetImage(&latest.Artifact{ ImageName: "domain/image", }) diff --git a/pkg/skaffold/runner/notification.go b/pkg/skaffold/runner/notification.go index ea1108af334..a2b5d12bdc2 100644 --- a/pkg/skaffold/runner/notification.go +++ b/pkg/skaffold/runner/notification.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -41,13 +41,12 @@ type withNotification struct { deploy.Deployer } -func (w withNotification) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact) ([]deploy.Artifact, error) { - res, err := w.Deployer.Deploy(ctx, out, builds) - if err != nil { - return nil, err +func (w withNotification) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact, labellers []deploy.Labeller) error { + if err := w.Deployer.Deploy(ctx, out, builds, labellers); err != nil { + return err } fmt.Fprint(out, terminalBell) - return res, nil + return nil } diff --git a/pkg/skaffold/runner/runner.go b/pkg/skaffold/runner/runner.go index 22f35cfe68d..1228eba40de 100644 --- a/pkg/skaffold/runner/runner.go +++ b/pkg/skaffold/runner/runner.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,14 +27,16 @@ import ( configutil "github.com/GoogleContainerTools/skaffold/cmd/skaffold/app/cmd/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/gcb" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/kaniko" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/local" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/plugin" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/build/tag" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/color" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/deploy" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes" kubectx "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes/context" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/plugin/environments/gcb" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sync" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sync/kubectl" @@ -52,9 +54,11 @@ type SkaffoldRunner struct { watch.Watcher opts *config.SkaffoldOptions + labellers []deploy.Labeller builds []build.Artifact hasDeployed bool imageList *kubernetes.ImageList + namespaces []string } // NewForConfig returns a new SkaffoldRunner for a SkaffoldPipeline @@ -65,6 +69,11 @@ func NewForConfig(opts *config.SkaffoldOptions, cfg *latest.SkaffoldPipeline) (* } logrus.Infof("Using kubectl context: %s", kubeContext) + namespaces, err := getAllPodNamespaces(opts.Namespace) + if err != nil { + return nil, errors.Wrap(err, "getting namespace list") + } + defaultRepo, err := configutil.GetDefaultRepo(opts.DefaultRepo) if err != nil { return nil, errors.Wrap(err, "getting default repo") @@ -80,7 +89,7 @@ func NewForConfig(opts *config.SkaffoldOptions, cfg *latest.SkaffoldPipeline) (* return nil, errors.Wrap(err, "parsing build config") } - tester, err := getTester(&cfg.Test, opts) + tester, err := getTester(cfg.Test, opts) if err != nil { return nil, errors.Wrap(err, "parsing test config") } @@ -90,7 +99,8 @@ func NewForConfig(opts *config.SkaffoldOptions, cfg *latest.SkaffoldPipeline) (* return nil, errors.Wrap(err, "parsing deploy config") } - deployer = deploy.WithLabels(deployer, opts, builder, deployer, tagger) + labellers := []deploy.Labeller{opts, builder, deployer, tagger} + builder, tester, deployer = WithTimings(builder, tester, deployer) if opts.Notification { deployer = WithNotification(deployer) @@ -102,30 +112,35 @@ func NewForConfig(opts *config.SkaffoldOptions, cfg *latest.SkaffoldPipeline) (* } return &SkaffoldRunner{ - Builder: builder, - Tester: tester, - Deployer: deployer, - Tagger: tagger, - Syncer: &kubectl.Syncer{}, - Watcher: watch.NewWatcher(trigger), - opts: opts, - imageList: kubernetes.NewImageList(), + Builder: builder, + Tester: tester, + Deployer: deployer, + Tagger: tagger, + Syncer: kubectl.NewSyncer(namespaces), + Watcher: watch.NewWatcher(trigger), + opts: opts, + labellers: labellers, + imageList: kubernetes.NewImageList(), + namespaces: namespaces, }, nil } func getBuilder(cfg *latest.BuildConfig, kubeContext string, opts *config.SkaffoldOptions) (build.Builder, error) { switch { + case buildWithPlugin(cfg.Artifacts): + logrus.Debugln("Using builder plugins") + return plugin.NewPluginBuilder(cfg, opts) case len(opts.PreBuiltImages) > 0: logrus.Debugln("Using pre-built images") return build.NewPreBuiltImagesBuilder(opts.PreBuiltImages), nil case cfg.LocalBuild != nil: logrus.Debugln("Using builder: local") - return local.NewBuilder(cfg.LocalBuild, kubeContext) + return local.NewBuilder(cfg.LocalBuild, kubeContext, opts.SkipTests) case cfg.GoogleCloudBuild != nil: logrus.Debugln("Using builder: google cloud") - return gcb.NewBuilder(cfg.GoogleCloudBuild), nil + return gcb.NewBuilder(cfg.GoogleCloudBuild, opts.SkipTests), nil case cfg.KanikoBuild != nil: logrus.Debugln("Using builder: kaniko") @@ -136,11 +151,20 @@ func getBuilder(cfg *latest.BuildConfig, kubeContext string, opts *config.Skaffo } } -func getTester(cfg *latest.TestConfig, opts *config.SkaffoldOptions) (test.Tester, error) { +func buildWithPlugin(artifacts []*latest.Artifact) bool { + for _, a := range artifacts { + if a.BuilderPlugin != nil { + return true + } + } + return false +} + +func getTester(cfg []*latest.TestCase, opts *config.SkaffoldOptions) (test.Tester, error) { switch { case len(opts.PreBuiltImages) > 0: logrus.Debugln("Skipping tests") - return test.NewTester(&latest.TestConfig{}) + return test.NewTester(nil) default: return test.NewTester(cfg) } @@ -198,7 +222,7 @@ func (r *SkaffoldRunner) newLogger(out io.Writer, artifacts []*latest.Artifact) imageNames = append(imageNames, artifact.ImageName) } - return kubernetes.NewLogAggregator(out, imageNames, r.imageList) + return kubernetes.NewLogAggregator(out, imageNames, r.imageList, r.namespaces) } // HasDeployed returns true if this runner has deployed something. @@ -220,7 +244,7 @@ func (r *SkaffoldRunner) buildTestDeploy(ctx context.Context, out io.Writer, art // Make sure all artifacts are redeployed. Not only those that were just built. r.builds = mergeWithPreviousBuilds(bRes, r.builds) - if _, err := r.Deploy(ctx, out, r.builds); err != nil { + if err := r.Deploy(ctx, out, r.builds); err != nil { return errors.Wrap(err, "deploy failed") } @@ -244,9 +268,34 @@ func (r *SkaffoldRunner) Run(ctx context.Context, out io.Writer, artifacts []*la return nil } +// imageTags generates tags for a list of artifacts +func (r *SkaffoldRunner) imageTags(out io.Writer, artifacts []*latest.Artifact) (tag.ImageTags, error) { + tags := make(tag.ImageTags, len(artifacts)) + + for _, artifact := range artifacts { + imageName := artifact.ImageName + color.Default.Fprintf(out, "Generating Tag for [%s]...\n", imageName) + + tag, err := r.Tagger.GenerateFullyQualifiedImageName(artifact.Workspace, imageName) + if err != nil { + return nil, errors.Wrapf(err, "generating tag for %s", imageName) + } + + logrus.Debugf("Tag for %s: %s\n", imageName, tag) + tags[imageName] = tag + } + + return tags, nil +} + // BuildAndTest builds artifacts and runs tests on built artifacts func (r *SkaffoldRunner) BuildAndTest(ctx context.Context, out io.Writer, artifacts []*latest.Artifact) ([]build.Artifact, error) { - bRes, err := r.Build(ctx, out, r.Tagger, artifacts) + tags, err := r.imageTags(out, artifacts) + if err != nil { + return nil, errors.Wrap(err, "generating tag") + } + + bRes, err := r.Build(ctx, out, tags, artifacts) if err != nil { return nil, errors.Wrap(err, "build failed") } @@ -260,10 +309,10 @@ func (r *SkaffoldRunner) BuildAndTest(ctx context.Context, out io.Writer, artifa } // Deploy deploys the given artifacts -func (r *SkaffoldRunner) Deploy(ctx context.Context, out io.Writer, artifacts []build.Artifact) ([]deploy.Artifact, error) { - dRes, err := r.Deployer.Deploy(ctx, out, artifacts) +func (r *SkaffoldRunner) Deploy(ctx context.Context, out io.Writer, artifacts []build.Artifact) error { + err := r.Deployer.Deploy(ctx, out, artifacts, r.labellers) r.hasDeployed = true - return dRes, err + return err } // TailLogs prints the logs for deployed artifacts. @@ -302,3 +351,31 @@ func mergeWithPreviousBuilds(builds, previous []build.Artifact) []build.Artifact return merged } + +func getAllPodNamespaces(configNamespace string) ([]string, error) { + // We also get the default namespace. + nsMap := make(map[string]bool) + if configNamespace == "" { + config, err := kubectx.CurrentConfig() + if err != nil { + return nil, errors.Wrap(err, "getting k8s configuration") + } + context, ok := config.Contexts[config.CurrentContext] + if ok { + nsMap[context.Namespace] = true + } else { + nsMap[""] = true + } + } else { + nsMap[configNamespace] = true + } + + // FIXME: Set additional namespaces from the selected yamls. + + // Collate the slice of namespaces. + namespaces := make([]string, 0, len(nsMap)) + for ns := range nsMap { + namespaces = append(namespaces, ns) + } + return namespaces, nil +} diff --git a/pkg/skaffold/runner/runner_test.go b/pkg/skaffold/runner/runner_test.go index 2e37bea9ce0..fbeea953942 100644 --- a/pkg/skaffold/runner/runner_test.go +++ b/pkg/skaffold/runner/runner_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -60,13 +60,16 @@ func (t *TestBench) Labels() map[string]string { return m func (t *TestBench) TestDependencies() ([]string, error) { return nil, nil } func (t *TestBench) Dependencies() ([]string, error) { return nil, nil } func (t *TestBench) Cleanup(ctx context.Context, out io.Writer) error { return nil } +func (t *TestBench) DependenciesForArtifact(ctx context.Context, artifact *latest.Artifact) ([]string, error) { + return nil, nil +} func (t *TestBench) enterNewCycle() { t.actions = append(t.actions, t.currentActions) t.currentActions = Actions{} } -func (t *TestBench) Build(ctx context.Context, w io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]build.Artifact, error) { +func (t *TestBench) Build(ctx context.Context, w io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { if len(t.buildErrors) > 0 { err := t.buildErrors[0] t.buildErrors = t.buildErrors[1:] @@ -85,7 +88,7 @@ func (t *TestBench) Build(ctx context.Context, w io.Writer, tagger tag.Tagger, a }) } - t.currentActions.Built = tags(builds) + t.currentActions.Built = findTags(builds) return builds, nil } @@ -111,28 +114,28 @@ func (t *TestBench) Test(ctx context.Context, out io.Writer, artifacts []build.A } } - t.currentActions.Tested = tags(artifacts) + t.currentActions.Tested = findTags(artifacts) return nil } -func (t *TestBench) Deploy(ctx context.Context, out io.Writer, artifacts []build.Artifact) ([]deploy.Artifact, error) { +func (t *TestBench) Deploy(ctx context.Context, out io.Writer, artifacts []build.Artifact, labellers []deploy.Labeller) error { if len(t.deployErrors) > 0 { err := t.deployErrors[0] t.deployErrors = t.deployErrors[1:] if err != nil { - return nil, err + return err } } - t.currentActions.Deployed = tags(artifacts) - return nil, nil + t.currentActions.Deployed = findTags(artifacts) + return nil } func (t *TestBench) Actions() []Actions { return append(t.actions, t.currentActions) } -func tags(artifacts []build.Artifact) []string { +func findTags(artifacts []build.Artifact) []string { var tags []string for _, artifact := range artifacts { tags = append(tags, artifact.Tag) diff --git a/pkg/skaffold/runner/timings.go b/pkg/skaffold/runner/timings.go index a4b880dee8c..601de36be07 100644 --- a/pkg/skaffold/runner/timings.go +++ b/pkg/skaffold/runner/timings.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -52,11 +52,11 @@ func (w withTimings) Labels() map[string]string { return labels.Merge(w.Builder.Labels(), w.Deployer.Labels()) } -func (w withTimings) Build(ctx context.Context, out io.Writer, tagger tag.Tagger, artifacts []*latest.Artifact) ([]build.Artifact, error) { +func (w withTimings) Build(ctx context.Context, out io.Writer, tags tag.ImageTags, artifacts []*latest.Artifact) ([]build.Artifact, error) { start := time.Now() color.Default.Fprintln(out, "Starting build...") - bRes, err := w.Builder.Build(ctx, out, tagger, artifacts) + bRes, err := w.Builder.Build(ctx, out, tags, artifacts) if err != nil { return nil, err } @@ -78,17 +78,16 @@ func (w withTimings) Test(ctx context.Context, out io.Writer, builds []build.Art return nil } -func (w withTimings) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact) ([]deploy.Artifact, error) { +func (w withTimings) Deploy(ctx context.Context, out io.Writer, builds []build.Artifact, labellers []deploy.Labeller) error { start := time.Now() color.Default.Fprintln(out, "Starting deploy...") - dRes, err := w.Deployer.Deploy(ctx, out, builds) - if err != nil { - return nil, err + if err := w.Deployer.Deploy(ctx, out, builds, labellers); err != nil { + return err } color.Default.Fprintln(out, "Deploy complete in", time.Since(start)) - return dRes, nil + return nil } func (w withTimings) Cleanup(ctx context.Context, out io.Writer) error { diff --git a/pkg/skaffold/schema/defaults/defaults.go b/pkg/skaffold/schema/defaults/defaults.go index 5eb32adc34c..3aa780c274b 100644 --- a/pkg/skaffold/schema/defaults/defaults.go +++ b/pkg/skaffold/schema/defaults/defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,34 +19,49 @@ package defaults import ( "fmt" - homedir "github.com/mitchellh/go-homedir" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/constants" kubectx "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes/context" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + homedir "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // Set makes sure default values are set on a SkaffoldPipeline. func Set(c *latest.SkaffoldPipeline) error { + if pluginsDefined(c) { + defaultToLocalExecEnvironment(c) + defaultToEmptyProperties(c) + } defaultToLocalBuild(c) defaultToKubectlDeploy(c) - setDefaultCloudBuildDockerImage(c) setDefaultTagger(c) setDefaultKustomizePath(c) setDefaultKubectlManifests(c) + if err := withCloudBuildConfig(c, + SetDefaultCloudBuildDockerImage, + setDefaultCloudBuildMavenImage, + setDefaultCloudBuildGradleImage, + ); err != nil { + return err + } + if err := withKanikoConfig(c, setDefaultKanikoTimeout, setDefaultKanikoImage, setDefaultKanikoNamespace, setDefaultKanikoSecret, setDefaultKanikoBuildContext, + setDefaultDockerConfigSecret, ); err != nil { return err } + if pluginsDefined(c) { + return nil + } + // Only set defaults on artifacts if not using plugin builders for _, a := range c.Build.Artifacts { defaultToDockerArtifact(a) setDefaultDockerfile(a) @@ -56,6 +71,29 @@ func Set(c *latest.SkaffoldPipeline) error { return nil } +func defaultToLocalExecEnvironment(c *latest.SkaffoldPipeline) { + if c.Build.ExecutionEnvironment == nil { + c.Build.ExecutionEnvironment = &latest.ExecutionEnvironment{ + Name: constants.Local, + } + } +} + +func defaultToEmptyProperties(c *latest.SkaffoldPipeline) { + if c.Build.ExecutionEnvironment.Properties == nil { + c.Build.ExecutionEnvironment.Properties = map[string]interface{}{} + } +} + +func pluginsDefined(c *latest.SkaffoldPipeline) bool { + for _, a := range c.Build.Artifacts { + if a.BuilderPlugin != nil { + return true + } + } + return false +} + func defaultToLocalBuild(c *latest.SkaffoldPipeline) { if c.Build.BuildType != (latest.BuildType{}) { return @@ -74,13 +112,32 @@ func defaultToKubectlDeploy(c *latest.SkaffoldPipeline) { c.Deploy.DeployType.KubectlDeploy = &latest.KubectlDeploy{} } -func setDefaultCloudBuildDockerImage(c *latest.SkaffoldPipeline) { - cloudBuild := c.Build.BuildType.GoogleCloudBuild - if cloudBuild == nil { - return +func withCloudBuildConfig(c *latest.SkaffoldPipeline, operations ...func(kaniko *latest.GoogleCloudBuild) error) error { + if gcb := c.Build.GoogleCloudBuild; gcb != nil { + for _, operation := range operations { + if err := operation(gcb); err != nil { + return err + } + } } - cloudBuild.DockerImage = valueOrDefault(cloudBuild.DockerImage, constants.DefaultCloudBuildDockerImage) + return nil +} + +// SetDefaultCloudBuildDockerImage sets the default cloud build image if it doesn't exist +func SetDefaultCloudBuildDockerImage(gcb *latest.GoogleCloudBuild) error { + gcb.DockerImage = valueOrDefault(gcb.DockerImage, constants.DefaultCloudBuildDockerImage) + return nil +} + +func setDefaultCloudBuildMavenImage(gcb *latest.GoogleCloudBuild) error { + gcb.MavenImage = valueOrDefault(gcb.MavenImage, constants.DefaultCloudBuildMavenImage) + return nil +} + +func setDefaultCloudBuildGradleImage(gcb *latest.GoogleCloudBuild) error { + gcb.GradleImage = valueOrDefault(gcb.GradleImage, constants.DefaultCloudBuildGradleImage) + return nil } func setDefaultTagger(c *latest.SkaffoldPipeline) { @@ -116,10 +173,15 @@ func defaultToDockerArtifact(a *latest.Artifact) { func setDefaultDockerfile(a *latest.Artifact) { if a.DockerArtifact != nil { - a.DockerArtifact.DockerfilePath = valueOrDefault(a.DockerArtifact.DockerfilePath, constants.DefaultDockerfilePath) + SetDefaultDockerArtifact(a.DockerArtifact) } } +// SetDefaultDockerArtifact sets defaults on docker artifacts +func SetDefaultDockerArtifact(a *latest.DockerArtifact) { + a.DockerfilePath = valueOrDefault(a.DockerfilePath, constants.DefaultDockerfilePath) +} + func setDefaultWorkspace(a *latest.Artifact) { a.Workspace = valueOrDefault(a.Workspace, ".") } @@ -175,6 +237,26 @@ func setDefaultKanikoSecret(kaniko *latest.KanikoBuild) error { return nil } +func setDefaultDockerConfigSecret(kaniko *latest.KanikoBuild) error { + if kaniko.DockerConfig == nil { + return nil + } + + kaniko.DockerConfig.SecretName = valueOrDefault(kaniko.DockerConfig.SecretName, constants.DefaultKanikoDockerConfigSecretName) + + if kaniko.DockerConfig.Path != "" { + absPath, err := homedir.Expand(kaniko.DockerConfig.Path) + if err != nil { + return fmt.Errorf("unable to expand dockerConfig.path %s", kaniko.DockerConfig.Path) + } + + kaniko.DockerConfig.Path = absPath + return nil + } + + return nil +} + func setDefaultKanikoBuildContext(kaniko *latest.KanikoBuild) error { if kaniko.BuildContext == nil { kaniko.BuildContext = &latest.KanikoBuildContext{ diff --git a/pkg/skaffold/schema/defaults/defaults_test.go b/pkg/skaffold/schema/defaults/defaults_test.go index 86935e30d47..354bce75484 100644 --- a/pkg/skaffold/schema/defaults/defaults_test.go +++ b/pkg/skaffold/schema/defaults/defaults_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/latest/config.go b/pkg/skaffold/schema/latest/config.go index 221e33da349..05aafba0d6b 100644 --- a/pkg/skaffold/schema/latest/config.go +++ b/pkg/skaffold/schema/latest/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,9 +18,10 @@ package latest import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" + yamlpatch "github.com/krishicks/yaml-patch" ) -const Version string = "skaffold/v1beta2" +const Version string = "skaffold/v1beta5" // NewSkaffoldPipeline creates a SkaffoldPipeline func NewSkaffoldPipeline() util.VersionedConfig { @@ -28,116 +29,269 @@ func NewSkaffoldPipeline() util.VersionedConfig { } type SkaffoldPipeline struct { + // APIVersion is the version of the configuration. APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - Build BuildConfig `yaml:"build,omitempty"` - Test TestConfig `yaml:"test,omitempty"` - Deploy DeployConfig `yaml:"deploy,omitempty"` - Profiles []Profile `yaml:"profiles,omitempty"` + // Kind is always `Config`. Defaults to `Config`. + Kind string `yaml:"kind"` + + // Build describes how images are built. + Build BuildConfig `yaml:"build,omitempty"` + + // Test describes how images are tested. + Test []*TestCase `yaml:"test,omitempty"` + + // Deploy describes how images are deployed. + Deploy DeployConfig `yaml:"deploy,omitempty"` + + // Profiles (beta) can override be used to `build`, `test` or `deploy` configuration. + Profiles []Profile `yaml:"profiles,omitempty"` } func (c *SkaffoldPipeline) GetVersion() string { return c.APIVersion } -// BuildConfig contains all the configuration for the build steps +// BuildConfig contains all the configuration for the build steps. type BuildConfig struct { + // Artifacts lists the images you're going to be building. Artifacts []*Artifact `yaml:"artifacts,omitempty"` - TagPolicy TagPolicy `yaml:"tagPolicy,omitempty"` + + // TagPolicy (beta) determines how images are tagged. + // A few strategies are provided here, although you most likely won't need to care! + // If not specified, it defaults to `gitCommit: {}`. + TagPolicy TagPolicy `yaml:"tagPolicy,omitempty"` + + // ExecutionEnvironment is the environment in which the build + // should run. Possible values: googleCloudBuild. + ExecutionEnvironment *ExecutionEnvironment `yaml:"executionEnvironment,omitempty"` + BuildType `yaml:",inline"` } -// TagPolicy contains all the configuration for the tagging step +type ExecEnvironment string + +// ExecutionEnvironment is the environment in which the build should run (ex. local or in-cluster, etc.). +type ExecutionEnvironment struct { + // Name is the name of the environment. + Name ExecEnvironment `yaml:"name,omitempty"` + + // Properties are key-value pairs passed to the environment. + Properties map[string]interface{} `yaml:"properties,omitempty"` +} + +// BuilderPlugin contains all fields necessary for specifying a build plugin. +type BuilderPlugin struct { + // Name is the name of the build plugin. + Name string `yaml:"name,omitempty"` + + // Properties are key-value pairs passed to the plugin. + Properties map[string]interface{} `yaml:"properties,omitempty"` + + // Contents + Contents []byte `yaml:",omitempty"` +} + +// TagPolicy contains all the configuration for the tagging step. type TagPolicy struct { - GitTagger *GitTagger `yaml:"gitCommit,omitempty" yamltags:"oneOf=tag"` - ShaTagger *ShaTagger `yaml:"sha256,omitempty" yamltags:"oneOf=tag"` + // GitTagger (beta) tags images with the git tag or commit of the artifact's workspace. + GitTagger *GitTagger `yaml:"gitCommit,omitempty" yamltags:"oneOf=tag"` + + // ShaTagger (beta) tags images with their sha256 digest. + ShaTagger *ShaTagger `yaml:"sha256,omitempty" yamltags:"oneOf=tag"` + + // EnvTemplateTagger (beta) tags images with a configurable template string. EnvTemplateTagger *EnvTemplateTagger `yaml:"envTemplate,omitempty" yamltags:"oneOf=tag"` - DateTimeTagger *DateTimeTagger `yaml:"dateTime,omitempty" yamltags:"oneOf=tag"` + + // DateTimeTagger (beta) tags images with the build timestamp. + DateTimeTagger *DateTimeTagger `yaml:"dateTime,omitempty" yamltags:"oneOf=tag"` } -// ShaTagger contains the configuration for the SHA tagger. +// ShaTagger (beta) tags images with their sha256 digest. type ShaTagger struct{} -// GitTagger contains the configuration for the git tagger. +// GitTagger (beta) tags images with the git tag or commit of the artifact's workspace. type GitTagger struct{} -// EnvTemplateTagger contains the configuration for the envTemplate tagger. +// EnvTemplateTagger (beta) tags images with a configurable template string. type EnvTemplateTagger struct { - Template string `yaml:"template,omitempty"` + // Template used to produce the image name and tag. + // See golang [text/template](https://golang.org/pkg/text/template/). + // The template is executed against the current environment, + // with those variables injected: + // IMAGE_NAME | Name of the image being built, as supplied in the artifacts section. + // For example: `{{.RELEASE}}-{{.IMAGE_NAME}}`. + Template string `yaml:"template,omitempty" yamltags:"required"` } -// DateTimeTagger contains the configuration for the DateTime tagger. +// DateTimeTagger (beta) tags images with the build timestamp. type DateTimeTagger struct { - Format string `yaml:"format,omitempty"` + // Format formats the date and time. + // See [#Time.Format](https://golang.org/pkg/time/#Time.Format). + // Defaults to `2006-01-02_15-04-05.999_MST`. + Format string `yaml:"format,omitempty"` + + // TimeZone sets the timezone for the date and time. + // See [Time.LoadLocation](https://golang.org/pkg/time/#Time.LoadLocation). + // Defaults to the local timezone. TimeZone string `yaml:"timezone,omitempty"` } // BuildType contains the specific implementation and parameters needed // for the build step. Only one field should be populated. type BuildType struct { - LocalBuild *LocalBuild `yaml:"local,omitempty" yamltags:"oneOf=build"` + // LocalBuild (beta) describes how to do a build on the local docker daemon + // and optionally push to a repository. + LocalBuild *LocalBuild `yaml:"local,omitempty" yamltags:"oneOf=build"` + + // GoogleCloudBuild (beta) describes how to do a remote build on + // [Google Cloud Build](https://cloud.google.com/cloud-build/). GoogleCloudBuild *GoogleCloudBuild `yaml:"googleCloudBuild,omitempty" yamltags:"oneOf=build"` - KanikoBuild *KanikoBuild `yaml:"kaniko,omitempty" yamltags:"oneOf=build"` + + // KanikoBuild (beta) describes how to do an on-cluster build using + // [Kaniko](https://github.com/GoogleContainerTools/kaniko). + KanikoBuild *KanikoBuild `yaml:"kaniko,omitempty" yamltags:"oneOf=build"` } -// LocalBuild contains the fields needed to do a build on the local docker daemon +// LocalBuild (beta) describes how to do a build on the local docker daemon // and optionally push to a repository. type LocalBuild struct { - Push *bool `yaml:"push,omitempty"` - UseDockerCLI bool `yaml:"useDockerCLI,omitempty"` - UseBuildkit bool `yaml:"useBuildkit,omitempty"` + // Push should images be pushed to a registry. + // If not specified, images are pushed only if the current Kubernetes context + // connects to a remote cluster. + Push *bool `yaml:"push,omitempty"` + + // UseDockerCLI use `docker` command-line interface instead of Docker Engine APIs. + UseDockerCLI bool `yaml:"useDockerCLI,omitempty"` + + // UseBuildkit use BuildKit to build Docker images. + UseBuildkit bool `yaml:"useBuildkit,omitempty"` } -// GoogleCloudBuild contains the fields needed to do a remote build on -// Google Cloud Build. +// GoogleCloudBuild (beta) describes how to do a remote build on +// [Google Cloud Build](https://cloud.google.com/cloud-build/docs/). +// Docker and Jib artifacts can be built on Cloud Build. The `projectId` needs +// to be provided and the currently logged in user should be given permissions to trigger +// new builds. type GoogleCloudBuild struct { - ProjectID string `yaml:"projectId,omitempty"` - DiskSizeGb int64 `yaml:"diskSizeGb,omitempty"` + // ProjectID is the ID of your Cloud Platform Project. + // If it is not provided, Skaffold will guess it from the image name. + // For example, given the artifact image name `gcr.io/myproject/image`, Skaffold + // will use the `myproject` GCP project. + ProjectID string `yaml:"projectId,omitempty"` + + // DiskSizeGb is the disk size of the VM that runs the build. + // See [Cloud Build Reference](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#buildoptions). + DiskSizeGb int64 `yaml:"diskSizeGb,omitempty"` + + // MachineType is the type of the VM that runs the build. + // See [Cloud Build Reference](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#buildoptions). MachineType string `yaml:"machineType,omitempty"` - Timeout string `yaml:"timeout,omitempty"` + + // Timeout is the amount of time (in seconds) that this build should be allowed to run. + // See [Cloud Build Reference](https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#resource-build). + Timeout string `yaml:"timeout,omitempty"` + + // DockerImage is the image that runs a Docker build. + // See [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders). + // Defaults to `gcr.io/cloud-builders/docker`. DockerImage string `yaml:"dockerImage,omitempty"` -} -// LocalDir represents the local directory kaniko build context -type LocalDir struct { + // MavenImage is the image that runs a Maven build. + // See [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders). + // Defaults to `gcr.io/cloud-builders/mvn`. + MavenImage string `yaml:"mavenImage,omitempty"` + + // GradleImage is the image that runs a Gradle build. + // See [Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders). + // Defaults to `gcr.io/cloud-builders/gradle`. + GradleImage string `yaml:"gradleImage,omitempty"` } +// LocalDir configures how Kaniko mounts sources directly via an `emptyDir` volume. +type LocalDir struct{} + // KanikoBuildContext contains the different fields available to specify -// a kaniko build context +// a Kaniko build context. type KanikoBuildContext struct { - GCSBucket string `yaml:"gcsBucket,omitempty" yamltags:"oneOf=buildContext"` - LocalDir *LocalDir `yaml:"localDir,omitempty" yamltags:"oneOf=buildContext"` + // GCSBucket is the CGS bucket to which sources are uploaded by Skaffold. + // Kaniko will need access to that bucket to download the sources. + GCSBucket string `yaml:"gcsBucket,omitempty" yamltags:"oneOf=buildContext"` + + // LocalDir configures how Kaniko mounts sources directly via an `emptyDir` volume. + LocalDir *LocalDir `yaml:"localDir,omitempty" yamltags:"oneOf=buildContext"` } -// KanikoCache contains fields related to kaniko caching +// KanikoCache configures Kaniko caching. If a cache is specified, Kaniko will +// use a remote cache which will speed up builds. type KanikoCache struct { + // Repo is a remote repository to store cached layers. If none is specified, one will be + // inferred from the image name. See [Kaniko Caching](https://github.com/GoogleContainerTools/kaniko#caching). Repo string `yaml:"repo,omitempty"` } -// KanikoBuild contains the fields needed to do a on-cluster build using -// the kaniko image +// KanikoBuild (beta) describes how to do an on-cluster build using +// [Kaniko](https://github.com/GoogleContainerTools/kaniko). type KanikoBuild struct { - BuildContext *KanikoBuildContext `yaml:"buildContext,omitempty"` - Cache *KanikoCache `yaml:"cache,omitempty"` - AdditionalFlags []string `yaml:"flags,omitempty"` - PullSecret string `yaml:"pullSecret,omitempty"` - PullSecretName string `yaml:"pullSecretName,omitempty"` - Namespace string `yaml:"namespace,omitempty"` - Timeout string `yaml:"timeout,omitempty"` - Image string `yaml:"image,omitempty"` + // BuildContext defines where Kaniko gets the sources from. + BuildContext *KanikoBuildContext `yaml:"buildContext,omitempty"` + + // Cache configures Kaniko caching. If a cache is specified, Kaniko will + // use a remote cache which will speed up builds. + Cache *KanikoCache `yaml:"cache,omitempty"` + + // AdditionalFlags are additional flags to be passed to Kaniko command line. + // See [Kaniko Additional Flags](https://github.com/GoogleContainerTools/kaniko#additional-flags). + AdditionalFlags []string `yaml:"flags,omitempty"` + + // PullSecret is the path to the secret key file. + // See [Kaniko Documentation](https://github.com/GoogleContainerTools/kaniko#running-kaniko-in-a-kubernetes-cluster). + PullSecret string `yaml:"pullSecret,omitempty"` + + // PullSecretName is the name of the Kubernetes secret for pulling the files + // from the build context and pushing the final image. + // Defaults to `kaniko-secret`. + PullSecretName string `yaml:"pullSecretName,omitempty"` + + // Namespace is the Kubernetes namespace. + // Defaults to current namespace in Kubernetes configuration. + Namespace string `yaml:"namespace,omitempty"` + + // Timeout is the amount of time (in seconds) that this build is allowed to run. + // Defaults to 20 minutes (`20m`). + Timeout string `yaml:"timeout,omitempty"` + + // Image is the Docker image used by the Kaniko pod. + // Defaults to the latest released version of `gcr.io/kaniko-project/executor`. + Image string `yaml:"image,omitempty"` + + // DockerConfig describes how to mount the local Docker configuration into the + // Kaniko pod. + DockerConfig *DockerConfig `yaml:"dockerConfig,omitempty"` } -type TestConfig []*TestCase +// DockerConfig contains information about the docker `config.json` to mount. +type DockerConfig struct { + // Path is the path to the docker `config.json`. + Path string `yaml:"path,omitempty"` + + // SecretName is the Kubernetes secret that will hold the Docker configuration. + SecretName string `yaml:"secretName,omitempty"` +} -// TestCase is a struct containing all the specified test -// configuration for an image. +// TestCase is a list of structure tests to run on images that Skaffold builds. type TestCase struct { - ImageName string `yaml:"image"` + // ImageName is the artifact on which to run those tests. + // For example: `gcr.io/k8s-skaffold/example`. + ImageName string `yaml:"image" yamltags:"required"` + + // StructureTests lists the [Container Structure Tests](https://github.com/GoogleContainerTools/container-structure-test) + // to run on that artifact. + // For example: `["./test/*"]`. StructureTests []string `yaml:"structureTests,omitempty"` } -// DeployConfig contains all the configuration needed by the deploy steps +// DeployConfig contains all the configuration needed by the deploy steps. type DeployConfig struct { DeployType `yaml:",inline"` } @@ -145,129 +299,293 @@ type DeployConfig struct { // DeployType contains the specific implementation and parameters needed // for the deploy step. Only one field should be populated. type DeployType struct { - HelmDeploy *HelmDeploy `yaml:"helm,omitempty" yamltags:"oneOf=deploy"` - KubectlDeploy *KubectlDeploy `yaml:"kubectl,omitempty" yamltags:"oneOf=deploy"` + // HelmDeploy (beta) uses the `helm` CLI to apply the charts to the cluster. + HelmDeploy *HelmDeploy `yaml:"helm,omitempty" yamltags:"oneOf=deploy"` + + // KubectlDeploy (beta) uses a client side `kubectl apply` to deploy manifests. + // You'll need a `kubectl` CLI version installed that's compatible with your cluster. + KubectlDeploy *KubectlDeploy `yaml:"kubectl,omitempty" yamltags:"oneOf=deploy"` + + // KustomizeDeploy (beta) uses the `kustomize` CLI to "patch" a deployment for a target environment. KustomizeDeploy *KustomizeDeploy `yaml:"kustomize,omitempty" yamltags:"oneOf=deploy"` } -// KubectlDeploy contains the configuration needed for deploying with `kubectl apply` +// KubectlDeploy (beta) uses a client side `kubectl apply` to deploy manifests. +// You'll need a `kubectl` CLI version installed that's compatible with your cluster. type KubectlDeploy struct { - Manifests []string `yaml:"manifests,omitempty"` - RemoteManifests []string `yaml:"remoteManifests,omitempty"` - Flags KubectlFlags `yaml:"flags,omitempty"` + // Manifests lists the Kubernetes yaml or json manifests. + // Defaults to `["k8s/*.yaml"]`. + Manifests []string `yaml:"manifests,omitempty"` + + // RemoteManifests lists Kubernetes manifests in remote clusters. + RemoteManifests []string `yaml:"remoteManifests,omitempty"` + + // Flags are additional flags passed to `kubectl`. + Flags KubectlFlags `yaml:"flags,omitempty"` } -// KubectlFlags describes additional options flags that are passed on the command +// KubectlFlags are additional flags passed on the command // line to kubectl either on every command (Global), on creations (Apply) // or deletions (Delete). type KubectlFlags struct { + // Global are additional flags passed on every command. Global []string `yaml:"global,omitempty"` - Apply []string `yaml:"apply,omitempty"` + + // Apply are additional flags passed on creations (`kubectl apply`). + Apply []string `yaml:"apply,omitempty"` + + // Delete are additional flags passed on deletions (`kubectl delete`). Delete []string `yaml:"delete,omitempty"` } -// HelmDeploy contains the configuration needed for deploying with helm +// HelmDeploy (beta) uses the `helm` CLI to apply the charts to the cluster. type HelmDeploy struct { - Releases []HelmRelease `yaml:"releases,omitempty"` + // Releases is a list of Helm releases. + Releases []HelmRelease `yaml:"releases,omitempty" yamltags:"required"` } -// KustomizeDeploy contains the configuration needed for deploying with kustomize. +// KustomizeDeploy (beta) uses the `kustomize` CLI to "patch" a deployment for a target environment. type KustomizeDeploy struct { - KustomizePath string `yaml:"path,omitempty"` - Flags KubectlFlags `yaml:"flags,omitempty"` + // KustomizePath is the path to Kustomization files. + // Defaults to `.`. + KustomizePath string `yaml:"path,omitempty"` + + // Flags are additional flags passed to `kubectl`. + Flags KubectlFlags `yaml:"flags,omitempty"` } type HelmRelease struct { - Name string `yaml:"name,omitempty"` - ChartPath string `yaml:"chartPath,omitempty"` - ValuesFiles []string `yaml:"valuesFiles,omitempty"` - Values map[string]string `yaml:"values,omitempty,omitempty"` - Namespace string `yaml:"namespace,omitempty"` - Version string `yaml:"version,omitempty"` - SetValues map[string]string `yaml:"setValues,omitempty"` - SetValueTemplates map[string]string `yaml:"setValueTemplates,omitempty"` - Wait bool `yaml:"wait,omitempty"` - SkipDependencyBuild bool `yaml:"skipDependencyBuild,omitempty"` - RecreatePods bool `yaml:"recreatePods,omitempty"` - Overrides map[string]interface{} `yaml:"overrides,omitempty"` - Packaged *HelmPackaged `yaml:"packaged,omitempty"` - ImageStrategy HelmImageStrategy `yaml:"imageStrategy,omitempty"` -} - -// HelmPackaged represents parameters for packaging helm chart. + // Name is the name of the Helm release. + Name string `yaml:"name,omitempty" yamltags:"required"` + + // ChartPath is the path to the Helm chart. + ChartPath string `yaml:"chartPath,omitempty" yamltags:"required"` + + // ValuesFiles are the paths to the Helm `values` files". + ValuesFiles []string `yaml:"valuesFiles,omitempty"` + + // Values are key-value pairs supplementing the Helm `values` file". + Values map[string]string `yaml:"values,omitempty,omitempty"` + + // Namespace is the Kubernetes namespace. + Namespace string `yaml:"namespace,omitempty"` + + // Version is the version of the chart. + Version string `yaml:"version,omitempty"` + + // SetValues are key-value pairs. + // If present, Skaffold will send `--set` flag to Helm CLI and append all pairs after the flag. + SetValues map[string]string `yaml:"setValues,omitempty"` + + // SetValueTemplates are key-value pairs. + // If present, Skaffold will try to parse the value part of each key-value pair using + // environment variables in the system, then send `--set` flag to Helm CLI and append + // all parsed pairs after the flag. + SetValueTemplates map[string]string `yaml:"setValueTemplates,omitempty"` + + // Wait if `true`, Skaffold will send `--wait` flag to Helm CLI. + // Defaults to `false`. + Wait bool `yaml:"wait,omitempty"` + + // RecreatePods if `true`, Skaffold will send `--recreate-pods` flag to Helm CLI. + // Defaults to `false`. + RecreatePods bool `yaml:"recreatePods,omitempty"` + + // SkipBuildDependencies should build dependencies be skipped. + SkipBuildDependencies bool `yaml:"skipBuildDependencies,omitempty"` + + // Overrides are key-value pairs. + // If present, Skaffold will build a Helm `values` file that overrides + // the original and use it to call Helm CLI (`--f` flag). + Overrides map[string]interface{} `yaml:"overrides,omitempty"` + + // Packaged parameters for packaging helm chart (`helm package`). + Packaged *HelmPackaged `yaml:"packaged,omitempty"` + + // ImageStrategy adds image configurations to the Helm `values` file. + ImageStrategy HelmImageStrategy `yaml:"imageStrategy,omitempty"` +} + +// HelmPackaged parameters for packaging helm chart (`helm package`). type HelmPackaged struct { - // Version sets the version on the chart to this semver version. + // Version sets the `version` on the chart to this semver version. Version string `yaml:"version,omitempty"` - // AppVersion set the appVersion on the chart to this version + // AppVersion sets the `appVersion` on the chart to this version. AppVersion string `yaml:"appVersion,omitempty"` } +// HelmImageStrategy adds image configurations to the Helm `values` file. type HelmImageStrategy struct { HelmImageConfig `yaml:",inline"` } type HelmImageConfig struct { - HelmFQNConfig *HelmFQNConfig `yaml:"fqn,omitempty"` + // HelmFQNConfig is the image configuration uses the syntax `IMAGE-NAME=IMAGE-REPOSITORY:IMAGE-TAG`. + HelmFQNConfig *HelmFQNConfig `yaml:"fqn,omitempty"` + + // HelmConventionConfig is the image configuration uses the syntax `IMAGE-NAME.repository=IMAGE-REPOSITORY, IMAGE-NAME.tag=IMAGE-TAG`. HelmConventionConfig *HelmConventionConfig `yaml:"helm,omitempty"` } -// HelmFQNConfig represents image config to use the FullyQualifiedImageName as param to set +// HelmFQNConfig is the image config to use the FullyQualifiedImageName as param to set. type HelmFQNConfig struct { + // Property defines the image config. Property string `yaml:"property,omitempty"` } -// HelmConventionConfig represents image config in the syntax of image.repository and image.tag +// HelmConventionConfig is the image config in the syntax of image.repository and image.tag. type HelmConventionConfig struct { } -// Artifact represents items that need to be built, along with the context in which +// Artifact are the items that need to be built, along with the context in which // they should be built. type Artifact struct { - ImageName string `yaml:"image,omitempty"` - Workspace string `yaml:"context,omitempty"` - Sync map[string]string `yaml:"sync,omitempty"` + // ImageName is the name of the image to be built. + // For example: `gcr.io/k8s-skaffold/example`. + ImageName string `yaml:"image,omitempty" yamltags:"required"` + + // Workspace is the directory where the artifact's sources are to be found. + // Defaults to `.`. + Workspace string `yaml:"context,omitempty"` + + // Sync (alpha) lists local files that can be synced to remote pods instead + // of triggering an image build when modified. + // This is a mapping of local files to sync to remote folders. + // For example: `{"*.py": ".", "css/**/*.css": "app/css"}`. + Sync map[string]string `yaml:"sync,omitempty"` + ArtifactType `yaml:",inline"` + + // BuilderPlugin is the plugin used to build this artifact. + BuilderPlugin *BuilderPlugin `yaml:"plugin,omitempty"` } -// Profile is additional configuration that overrides default -// configuration when it is activated. +// Profile (beta) profiles are used to override any `build`, `test` or `deploy` configuration. type Profile struct { - Name string `yaml:"name,omitempty"` - Build BuildConfig `yaml:"build,omitempty"` - Test TestConfig `yaml:"test,omitempty"` + // Name is a unique profile name. + // For example: `profile-prod`. + Name string `yaml:"name,omitempty" yamltags:"required"` + + // Build replaces the main `build` configuration. + Build BuildConfig `yaml:"build,omitempty"` + + // Test replaces the main `test` configuration. + Test []*TestCase `yaml:"test,omitempty"` + + // Deploy replaces the main `deploy` configuration. Deploy DeployConfig `yaml:"deploy,omitempty"` + + // Patches is a list of patches applied to the configuration. + // Patches use the JSON patch notation. + Patches []JSONPatch `yaml:"patches,omitempty"` + + // Activation criteria by which a profile can be auto-activated. + Activation []Activation `yaml:"activation,omitempty"` +} + +// JSONPatch patch to be applied by a profile. +type JSONPatch struct { + // Op is the operation carried by the patch: `add`, `remove`, `replace`, `move`, `copy` or `test`. + // Defaults to `replace`. + Op string `yaml:"op,omitempty"` + + // Path is the position in the yaml where the operation takes place. + // For example, this targets the `dockerfile` of the first artifact built. + // For example: `/build/artifacts/0/docker/dockerfile`. + Path string `yaml:"path,omitempty" yamltags:"required"` + + // From is the source position in the yaml, used for `copy` or `move` operations. + From string `yaml:"from,omitempty"` + + // Value is the value to apply. Can be any portion of yaml. + Value *yamlpatch.Node `yaml:"value,omitempty"` +} + +// Activation criteria by which a profile is auto-activated. +type Activation struct { + // Env is a key=value pair. The profile is auto-activated if an Environment + // Variable `key` has value `value`. + // For example: `ENV=production`. + Env string `yaml:"env,omitempty"` + + // KubeContext is a Kubernetes context for which the profile is auto-activated. + // For example: `minikube`. + KubeContext string `yaml:"kubeContext,omitempty"` + + // Command is a Skaffold command for which the profile is auto-activated. + // For example: `dev`. + Command string `yaml:"command,omitempty"` } type ArtifactType struct { - DockerArtifact *DockerArtifact `yaml:"docker,omitempty" yamltags:"oneOf=artifact"` - BazelArtifact *BazelArtifact `yaml:"bazel,omitempty" yamltags:"oneOf=artifact"` - JibMavenArtifact *JibMavenArtifact `yaml:"jibMaven,omitempty" yamltags:"oneOf=artifact"` + // DockerArtifact (beta) describes an artifact built from a Dockerfile. + DockerArtifact *DockerArtifact `yaml:"docker,omitempty" yamltags:"oneOf=artifact"` + + // BazelArtifact (beta) requires bazel CLI to be installed and the sources to + // contain [Bazel](https://bazel.build/) configuration files. + BazelArtifact *BazelArtifact `yaml:"bazel,omitempty" yamltags:"oneOf=artifact"` + + // JibMavenArtifact (alpha) builds images using the + // [Jib plugin for Maven](https://github.com/GoogleContainerTools/jib/tree/master/jib-maven-plugin). + JibMavenArtifact *JibMavenArtifact `yaml:"jibMaven,omitempty" yamltags:"oneOf=artifact"` + + // JibGradleArtifact (alpha) builds images using the + // [Jib plugin for Gradle](https://github.com/GoogleContainerTools/jib/tree/master/jib-gradle-plugin). JibGradleArtifact *JibGradleArtifact `yaml:"jibGradle,omitempty" yamltags:"oneOf=artifact"` } -// DockerArtifact describes an artifact built from a Dockerfile, +// DockerArtifact (beta) describes an artifact built from a Dockerfile, // usually using `docker build`. type DockerArtifact struct { - DockerfilePath string `yaml:"dockerfile,omitempty"` - BuildArgs map[string]*string `yaml:"buildArgs,omitempty"` - CacheFrom []string `yaml:"cacheFrom,omitempty"` - Target string `yaml:"target,omitempty"` + // DockerfilePath locates the Dockerfile relative to workspace. + // Defaults to `Dockerfile`. + DockerfilePath string `yaml:"dockerfile,omitempty"` + + // Target is the Dockerfile target name to build. + Target string `yaml:"target,omitempty"` + + // BuildArgs are arguments passed to the docker build. + // For example: `{"key1": "value1", "key2": "value2"}`. + BuildArgs map[string]*string `yaml:"buildArgs,omitempty"` + + // CacheFrom lists the Docker images to consider as cache sources. + // For example: `["golang:1.10.1-alpine3.7", "alpine:3.7"]`. + CacheFrom []string `yaml:"cacheFrom,omitempty"` } -// BazelArtifact describes an artifact built with Bazel. +// BazelArtifact (beta) describes an artifact built with [Bazel](https://bazel.build/). type BazelArtifact struct { - BuildTarget string `yaml:"target,omitempty"` - BuildArgs []string `yaml:"args,omitempty"` + // BuildTarget is the `bazel build` target to run. + // For example: `//:skaffold_example.tar`. + BuildTarget string `yaml:"target,omitempty" yamltags:"required"` + + // BuildArgs are additional args to pass to `bazel build`. + // For example: `["-flag", "--otherflag"]`. + BuildArgs []string `yaml:"args,omitempty"` } +// JibMavenArtifact (alpha) builds images using the +// [Jib plugin for Maven](https://github.com/GoogleContainerTools/jib/tree/master/jib-maven-plugin). type JibMavenArtifact struct { - // Only multi-module - Module string `yaml:"module"` + // Module selects which Maven module to build, for a multi module project. + Module string `yaml:"module"` + + // Profile selects which Maven profile to activate. Profile string `yaml:"profile"` + + // Flags are additional build flags passed to Maven. + // For example: `["-x", "-DskipTests"]`. + Flags []string `yaml:"args,omitempty"` } +// JibGradleArtifact (alpha) builds images using the +// [Jib plugin for Gradle](https://github.com/GoogleContainerTools/jib/tree/master/jib-gradle-plugin). type JibGradleArtifact struct { - // Only multi-module + // Project selects which Gradle project to build. Project string `yaml:"project"` + + // Flags are additional build flags passed to Gradle. + // For example: `["--no-build-cache"]`. + Flags []string `yaml:"args,omitempty"` } diff --git a/pkg/skaffold/schema/latest/upgrade.go b/pkg/skaffold/schema/latest/upgrade.go index e6ee9481f5f..a0515b6c6dd 100644 --- a/pkg/skaffold/schema/latest/upgrade.go +++ b/pkg/skaffold/schema/latest/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/profile_test.go b/pkg/skaffold/schema/profile_test.go deleted file mode 100644 index 2634d5a89da..00000000000 --- a/pkg/skaffold/schema/profile_test.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2018 The Skaffold Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package schema - -import ( - "testing" - - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" - "github.com/GoogleContainerTools/skaffold/testutil" -) - -func TestApplyProfiles(t *testing.T) { - tests := []struct { - description string - config *latest.SkaffoldPipeline - profile string - expected *latest.SkaffoldPipeline - shouldErr bool - }{ - { - description: "unknown profile", - config: config(), - profile: "profile", - expected: config(), - shouldErr: true, - }, - { - description: "build type", - profile: "profile", - config: config( - withLocalBuild( - withGitTagger(), - withDockerArtifact("image", ".", "Dockerfile"), - ), - withKubectlDeploy("k8s/*.yaml"), - withProfiles(latest.Profile{ - Name: "profile", - Build: latest.BuildConfig{ - BuildType: latest.BuildType{ - GoogleCloudBuild: &latest.GoogleCloudBuild{ - ProjectID: "my-project", - }, - }, - }, - }), - ), - expected: config( - withGoogleCloudBuild("my-project", - withGitTagger(), - withDockerArtifact("image", ".", "Dockerfile"), - ), - withKubectlDeploy("k8s/*.yaml"), - ), - }, - { - description: "tag policy", - profile: "dev", - config: config( - withLocalBuild( - withGitTagger(), - withDockerArtifact("image", ".", "Dockerfile"), - ), - withKubectlDeploy("k8s/*.yaml"), - withProfiles(latest.Profile{ - Name: "dev", - Build: latest.BuildConfig{ - TagPolicy: latest.TagPolicy{ShaTagger: &latest.ShaTagger{}}, - }, - }), - ), - expected: config( - withLocalBuild( - withShaTagger(), - withDockerArtifact("image", ".", "Dockerfile"), - ), - withKubectlDeploy("k8s/*.yaml"), - ), - }, - { - description: "artifacts", - profile: "profile", - config: config( - withLocalBuild( - withGitTagger(), - withDockerArtifact("image", ".", "Dockerfile"), - ), - withKubectlDeploy("k8s/*.yaml"), - withProfiles(latest.Profile{ - Name: "profile", - Build: latest.BuildConfig{ - Artifacts: []*latest.Artifact{ - {ImageName: "image"}, - {ImageName: "imageProd"}, - }, - }, - }), - ), - expected: config( - withLocalBuild( - withGitTagger(), - withDockerArtifact("image", ".", "Dockerfile"), - withDockerArtifact("imageProd", ".", "Dockerfile"), - ), - withKubectlDeploy("k8s/*.yaml"), - ), - }, - { - description: "deploy", - profile: "profile", - config: config( - withLocalBuild( - withGitTagger(), - ), - withKubectlDeploy("k8s/*.yaml"), - withProfiles(latest.Profile{ - Name: "profile", - Deploy: latest.DeployConfig{ - DeployType: latest.DeployType{ - HelmDeploy: &latest.HelmDeploy{}, - }, - }, - }), - ), - expected: config( - withLocalBuild( - withGitTagger(), - ), - withHelmDeploy(), - ), - }, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - err := ApplyProfiles(test.config, []string{test.profile}) - - testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, test.config) - }) - } -} diff --git a/pkg/skaffold/schema/profiles.go b/pkg/skaffold/schema/profiles.go index 1920fb4c647..a917667cba6 100644 --- a/pkg/skaffold/schema/profiles.go +++ b/pkg/skaffold/schema/profiles.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,36 +18,115 @@ package schema import ( "fmt" + "os" "reflect" "strings" + cfg "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + kubectx "github.com/GoogleContainerTools/skaffold/pkg/skaffold/kubernetes/context" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + yamlpatch "github.com/krishicks/yaml-patch" "github.com/pkg/errors" "github.com/sirupsen/logrus" - - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/defaults" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + yaml "gopkg.in/yaml.v2" ) // ApplyProfiles returns configuration modified by the application // of a list of profiles. -func ApplyProfiles(c *latest.SkaffoldPipeline, profiles []string) error { +func ApplyProfiles(c *latest.SkaffoldPipeline, opts *cfg.SkaffoldOptions) error { byName := profilesByName(c.Profiles) + + profiles, err := activatedProfiles(c.Profiles, opts) + if err != nil { + return errors.Wrap(err, "finding auto-activated profiles") + } + for _, name := range profiles { profile, present := byName[name] if !present { return fmt.Errorf("couldn't find profile %s", name) } - applyProfile(c, profile) - } - if err := defaults.Set(c); err != nil { - return errors.Wrap(err, "applying default values") + if err := applyProfile(c, profile); err != nil { + return errors.Wrapf(err, "appying profile %s", name) + } } return nil } -func applyProfile(config *latest.SkaffoldPipeline, profile latest.Profile) { +func activatedProfiles(profiles []latest.Profile, opts *cfg.SkaffoldOptions) ([]string, error) { + activated := opts.Profiles + + // Auto-activated profiles + for _, profile := range profiles { + for _, cond := range profile.Activation { + command := isCommand(cond.Command, opts) + + env, err := isEnv(cond.Env) + if err != nil { + return nil, err + } + + kubeContext, err := isKubeContext(cond.KubeContext) + if err != nil { + return nil, err + } + + if command && env && kubeContext { + activated = append(activated, profile.Name) + } + } + } + + return activated, nil +} + +func isEnv(env string) (bool, error) { + if env == "" { + return true, nil + } + + keyValue := strings.SplitN(env, "=", 2) + if len(keyValue) != 2 { + return false, fmt.Errorf("invalid env variable format: %s, should be KEY=VALUE", env) + } + + key := keyValue[0] + value := keyValue[1] + + return satisfies(value, os.Getenv(key)), nil +} + +func isCommand(command string, opts *cfg.SkaffoldOptions) bool { + if command == "" { + return true + } + + return satisfies(command, opts.Command) +} + +func isKubeContext(kubeContext string) (bool, error) { + if kubeContext == "" { + return true, nil + } + + currentKubeContext, err := kubectx.CurrentContext() + if err != nil { + return false, errors.Wrap(err, "getting current cluster context") + } + + return satisfies(kubeContext, currentKubeContext), nil +} + +func satisfies(expected, actual string) bool { + if strings.HasPrefix(expected, "!") { + return actual != expected[1:] + } + return actual == expected +} + +func applyProfile(config *latest.SkaffoldPipeline, profile latest.Profile) error { logrus.Infof("applying profile: %s", profile.Name) // this intentionally removes the Profiles field from the returned config @@ -56,8 +135,41 @@ func applyProfile(config *latest.SkaffoldPipeline, profile latest.Profile) { Kind: config.Kind, Build: overlayProfileField(config.Build, profile.Build).(latest.BuildConfig), Deploy: overlayProfileField(config.Deploy, profile.Deploy).(latest.DeployConfig), - Test: overlayProfileField(config.Test, profile.Test).(latest.TestConfig), + Test: overlayProfileField(config.Test, profile.Test).([]*latest.TestCase), } + + if len(profile.Patches) == 0 { + return nil + } + + // Apply profile patches + buf, err := yaml.Marshal(*config) + if err != nil { + return err + } + + var patches []yamlpatch.Operation + for _, patch := range profile.Patches { + // Default patch operation to `replace` + op := patch.Op + if op == "" { + op = "replace" + } + + patches = append(patches, yamlpatch.Operation{ + Op: yamlpatch.Op(op), + Path: yamlpatch.OpPath(patch.Path), + From: yamlpatch.OpPath(patch.From), + Value: patch.Value, + }) + } + + buf, err = yamlpatch.Patch(patches).Apply(buf) + if err != nil { + return err + } + + return yaml.Unmarshal(buf, config) } func profilesByName(profiles []latest.Profile) map[string]latest.Profile { diff --git a/pkg/skaffold/schema/profiles_test.go b/pkg/skaffold/schema/profiles_test.go new file mode 100644 index 00000000000..5be774e1afd --- /dev/null +++ b/pkg/skaffold/schema/profiles_test.go @@ -0,0 +1,376 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + "os" + "testing" + + cfg "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" + yamlpatch "github.com/krishicks/yaml-patch" + "k8s.io/client-go/tools/clientcmd/api" +) + +func TestApplyPatch(t *testing.T) { + config := `build: + artifacts: + - image: example +profiles: +- name: patches + patches: + - path: /build/artifacts/0/image + value: replacement + - op: add + path: /build/artifacts/0/docker + value: + dockerfile: Dockerfile.DEV + - op: add + path: /build/artifacts/- + value: + image: second + docker: + dockerfile: Dockerfile.second +` + + tmp, cleanup := testutil.NewTempDir(t) + defer cleanup() + + yaml := fmt.Sprintf("apiVersion: %s\nkind: Config\n%s", latest.Version, config) + tmp.Write("skaffold.yaml", yaml) + + parsed, err := ParseConfig(tmp.Path("skaffold.yaml"), false) + testutil.CheckError(t, false, err) + + pipeline := parsed.(*latest.SkaffoldPipeline) + err = ApplyProfiles(pipeline, &cfg.SkaffoldOptions{ + Profiles: []string{"patches"}, + }) + testutil.CheckError(t, false, err) + + testutil.CheckDeepEqual(t, "replacement", pipeline.Build.Artifacts[0].ImageName) + testutil.CheckDeepEqual(t, "Dockerfile.DEV", pipeline.Build.Artifacts[0].DockerArtifact.DockerfilePath) + testutil.CheckDeepEqual(t, "Dockerfile.second", pipeline.Build.Artifacts[1].DockerArtifact.DockerfilePath) +} + +func TestApplyProfiles(t *testing.T) { + tests := []struct { + description string + config *latest.SkaffoldPipeline + profile string + expected *latest.SkaffoldPipeline + shouldErr bool + }{ + { + description: "unknown profile", + config: config(), + profile: "profile", + shouldErr: true, + }, + { + description: "build type", + profile: "profile", + config: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + withProfiles(latest.Profile{ + Name: "profile", + Build: latest.BuildConfig{ + BuildType: latest.BuildType{ + GoogleCloudBuild: &latest.GoogleCloudBuild{ + ProjectID: "my-project", + DockerImage: "gcr.io/cloud-builders/docker", + MavenImage: "gcr.io/cloud-builders/mvn", + GradleImage: "gcr.io/cloud-builders/gradle", + }, + }, + }, + }), + ), + expected: config( + withGoogleCloudBuild("my-project", + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + ), + }, + { + description: "tag policy", + profile: "dev", + config: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + withProfiles(latest.Profile{ + Name: "dev", + Build: latest.BuildConfig{ + TagPolicy: latest.TagPolicy{ShaTagger: &latest.ShaTagger{}}, + }, + }), + ), + expected: config( + withLocalBuild( + withShaTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + ), + }, + { + description: "artifacts", + profile: "profile", + config: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + withProfiles(latest.Profile{ + Name: "profile", + Build: latest.BuildConfig{ + Artifacts: []*latest.Artifact{ + {ImageName: "image", Workspace: ".", ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + DockerfilePath: "Dockerfile.DEV", + }, + }}, + {ImageName: "imageProd", Workspace: ".", ArtifactType: latest.ArtifactType{ + DockerArtifact: &latest.DockerArtifact{ + DockerfilePath: "Dockerfile.DEV", + }, + }}, + }, + }, + }), + ), + expected: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile.DEV"), + withDockerArtifact("imageProd", ".", "Dockerfile.DEV"), + ), + withKubectlDeploy("k8s/*.yaml"), + ), + }, + { + description: "deploy", + profile: "profile", + config: config( + withLocalBuild( + withGitTagger(), + ), + withKubectlDeploy("k8s/*.yaml"), + withProfiles(latest.Profile{ + Name: "profile", + Deploy: latest.DeployConfig{ + DeployType: latest.DeployType{ + HelmDeploy: &latest.HelmDeploy{}, + }, + }, + }), + ), + expected: config( + withLocalBuild( + withGitTagger(), + ), + withHelmDeploy(), + ), + }, + { + description: "patch Dockerfile", + profile: "profile", + config: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + withProfiles(latest.Profile{ + Name: "profile", + Patches: []latest.JSONPatch{{ + Path: "/build/artifacts/0/docker/dockerfile", + Value: yamlpatch.NewNode(str("Dockerfile.DEV")), + }}, + }), + ), + expected: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile.DEV"), + ), + withKubectlDeploy("k8s/*.yaml"), + ), + }, + { + description: "invalid patch path", + profile: "profile", + config: config( + withLocalBuild( + withGitTagger(), + withDockerArtifact("image", ".", "Dockerfile"), + ), + withKubectlDeploy("k8s/*.yaml"), + withProfiles(latest.Profile{ + Name: "profile", + Patches: []latest.JSONPatch{{ + Path: "/unknown", + Op: "replace", + }}, + }), + ), + shouldErr: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + err := ApplyProfiles(test.config, &cfg.SkaffoldOptions{ + Profiles: []string{test.profile}, + }) + + if test.shouldErr { + testutil.CheckError(t, test.shouldErr, err) + } else { + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, test.config) + } + }) + } +} + +func TestActivatedProfiles(t *testing.T) { + tests := []struct { + description string + profiles []latest.Profile + opts *cfg.SkaffoldOptions + expected []string + shouldErr bool + }{ + { + description: "Selected on the command line", + opts: &cfg.SkaffoldOptions{ + Command: "dev", + Profiles: []string{"activated", "also-activated"}, + }, + profiles: []latest.Profile{ + {Name: "activated"}, + {Name: "not-activated"}, + {Name: "also-activated"}, + }, + expected: []string{"activated", "also-activated"}, + }, { + description: "Auto-activated by command", + opts: &cfg.SkaffoldOptions{ + Command: "dev", + }, + profiles: []latest.Profile{ + {Name: "run-profile", Activation: []latest.Activation{{Command: "run"}}}, + {Name: "dev-profile", Activation: []latest.Activation{{Command: "dev"}}}, + {Name: "non-run-profile", Activation: []latest.Activation{{Command: "!run"}}}, + }, + expected: []string{"dev-profile", "non-run-profile"}, + }, { + description: "Auto-activated by env variable", + opts: &cfg.SkaffoldOptions{}, + profiles: []latest.Profile{ + {Name: "activated", Activation: []latest.Activation{{Env: "KEY=VALUE"}}}, + {Name: "not-activated", Activation: []latest.Activation{{Env: "KEY=OTHER"}}}, + {Name: "also-activated", Activation: []latest.Activation{{Env: "KEY=!OTHER"}}}, + }, + expected: []string{"activated", "also-activated"}, + }, { + description: "Invalid env variable", + opts: &cfg.SkaffoldOptions{}, + profiles: []latest.Profile{ + {Name: "activated", Activation: []latest.Activation{{Env: "KEY:VALUE"}}}, + }, + shouldErr: true, + }, { + description: "Auto-activated by kube context", + opts: &cfg.SkaffoldOptions{}, + profiles: []latest.Profile{ + {Name: "activated", Activation: []latest.Activation{{KubeContext: "prod-context"}}}, + {Name: "not-activated", Activation: []latest.Activation{{KubeContext: "dev-context"}}}, + {Name: "also-activated", Activation: []latest.Activation{{KubeContext: "!dev-context"}}}, + }, + expected: []string{"activated", "also-activated"}, + }, { + description: "AND between activation criteria", + opts: &cfg.SkaffoldOptions{ + Command: "dev", + }, + profiles: []latest.Profile{ + { + Name: "activated", Activation: []latest.Activation{{ + Env: "KEY=VALUE", + KubeContext: "prod-context", + Command: "dev", + }}, + }, + { + Name: "not-activated", Activation: []latest.Activation{{ + Env: "KEY=VALUE", + KubeContext: "prod-context", + Command: "build", + }}, + }, + }, + expected: []string{"activated"}, + }, { + description: "OR between activations", + opts: &cfg.SkaffoldOptions{ + Command: "dev", + }, + profiles: []latest.Profile{ + { + Name: "activated", Activation: []latest.Activation{{ + Command: "run", + }, { + Command: "dev", + }}, + }, + }, + expected: []string{"activated"}, + }, + } + + os.Setenv("KEY", "VALUE") + restore := testutil.SetupFakeKubernetesContext(t, api.Config{CurrentContext: "prod-context"}) + defer restore() + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + + activated, err := activatedProfiles(test.profiles, test.opts) + + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, activated) + }) + } + +} + +func str(value string) *interface{} { + var v interface{} = value + return &v +} diff --git a/pkg/skaffold/schema/samples_test.go b/pkg/skaffold/schema/samples_test.go new file mode 100644 index 00000000000..73635ac4956 --- /dev/null +++ b/pkg/skaffold/schema/samples_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +const samplesRoot = "../../../docs/content/en/samples" + +func TestParseSamples(t *testing.T) { + paths, err := findSamples(samplesRoot) + if err != nil { + t.Fatalf("unable to read sample files in %q", samplesRoot) + } + + if len(paths) == 0 { + t.Fatalf("did not find sample files in %q", samplesRoot) + } + + tmpDir, teardown := testutil.NewTempDir(t) + defer teardown() + + for _, path := range paths { + name := filepath.Base(path) + + t.Run(name, func(t *testing.T) { + buf, err := ioutil.ReadFile(path) + testutil.CheckError(t, false, err) + + tmpDir.Write(name, addHeader(buf)) + + _, err = ParseConfig(tmpDir.Path(name), true) + testutil.CheckError(t, false, err) + }) + } +} + +func findSamples(root string) ([]string, error) { + var files []string + + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + files = append(files, path) + } + return err + }) + + return files, err +} + +func addHeader(buf []byte) string { + if bytes.HasPrefix(buf, []byte("apiVersion:")) { + return string(buf) + } + return fmt.Sprintf("apiVersion: %s\nkind: Config\n%s", latest.Version, buf) +} diff --git a/pkg/skaffold/schema/util/util.go b/pkg/skaffold/schema/util/util.go index 2577bf5068a..b878af2060c 100644 --- a/pkg/skaffold/schema/util/util.go +++ b/pkg/skaffold/schema/util/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha1/config.go b/pkg/skaffold/schema/v1alpha1/config.go index 62b7da07041..75c1c7c9d88 100644 --- a/pkg/skaffold/schema/v1alpha1/config.go +++ b/pkg/skaffold/schema/v1alpha1/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha1/upgrade.go b/pkg/skaffold/schema/v1alpha1/upgrade.go index f23d0cadd68..6262e376187 100644 --- a/pkg/skaffold/schema/v1alpha1/upgrade.go +++ b/pkg/skaffold/schema/v1alpha1/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha1/upgrade_test.go b/pkg/skaffold/schema/v1alpha1/upgrade_test.go index 29c49326698..60db7bf6ce9 100644 --- a/pkg/skaffold/schema/v1alpha1/upgrade_test.go +++ b/pkg/skaffold/schema/v1alpha1/upgrade_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ build: tagPolicy: gitCommit: {} ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } func TestUpgrade_sha256Tagger(t *testing.T) { @@ -52,7 +52,7 @@ build: tagPolicy: sha256: {} ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } func TestUpgrade_deploy(t *testing.T) { @@ -77,10 +77,10 @@ deploy: manifests: - k8s-* ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } -func verityUpgrade(t *testing.T, input, output string) { +func verifyUpgrade(t *testing.T, input, output string) { pipeline := NewSkaffoldPipeline() err := yaml.UnmarshalStrict([]byte(input), pipeline) testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) diff --git a/pkg/skaffold/schema/v1alpha2/config.go b/pkg/skaffold/schema/v1alpha2/config.go index 09490a0aa83..8580230cf96 100644 --- a/pkg/skaffold/schema/v1alpha2/config.go +++ b/pkg/skaffold/schema/v1alpha2/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha2/upgrade.go b/pkg/skaffold/schema/v1alpha2/upgrade.go index 8ace5c8cd44..a48526d23b9 100644 --- a/pkg/skaffold/schema/v1alpha2/upgrade.go +++ b/pkg/skaffold/schema/v1alpha2/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha2/upgrade_test.go b/pkg/skaffold/schema/v1alpha2/upgrade_test.go index d6788c43914..3dd23d3dea5 100644 --- a/pkg/skaffold/schema/v1alpha2/upgrade_test.go +++ b/pkg/skaffold/schema/v1alpha2/upgrade_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ deploy: valuesFiles: - values.yaml ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } func TestUpgrade_kanikoWithProfile(t *testing.T) { @@ -92,10 +92,10 @@ profiles: manifests: - k8s-* ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } -func verityUpgrade(t *testing.T, input, output string) { +func verifyUpgrade(t *testing.T, input, output string) { pipeline := NewSkaffoldPipeline() err := yaml.UnmarshalStrict([]byte(input), pipeline) testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) diff --git a/pkg/skaffold/schema/v1alpha3/config.go b/pkg/skaffold/schema/v1alpha3/config.go index f8db3667c43..8c07f549ffa 100644 --- a/pkg/skaffold/schema/v1alpha3/config.go +++ b/pkg/skaffold/schema/v1alpha3/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha3/upgrade.go b/pkg/skaffold/schema/v1alpha3/upgrade.go index 3d6cbbff7f3..fb692be023f 100644 --- a/pkg/skaffold/schema/v1alpha3/upgrade.go +++ b/pkg/skaffold/schema/v1alpha3/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha3/upgrade_test.go b/pkg/skaffold/schema/v1alpha3/upgrade_test.go index 3ab3e34ef60..3b4dd6612e3 100644 --- a/pkg/skaffold/schema/v1alpha3/upgrade_test.go +++ b/pkg/skaffold/schema/v1alpha3/upgrade_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ profiles: manifests: - k8s-* ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } func TestUpgrade_skipPush(t *testing.T) { @@ -98,10 +98,10 @@ profiles: local: push: true ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } -func verityUpgrade(t *testing.T, input, output string) { +func verifyUpgrade(t *testing.T, input, output string) { pipeline := NewSkaffoldPipeline() err := yaml.UnmarshalStrict([]byte(input), pipeline) testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) diff --git a/pkg/skaffold/schema/v1alpha4/config.go b/pkg/skaffold/schema/v1alpha4/config.go index 5359f2095b7..2f948e13fcf 100644 --- a/pkg/skaffold/schema/v1alpha4/config.go +++ b/pkg/skaffold/schema/v1alpha4/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha4/upgrade.go b/pkg/skaffold/schema/v1alpha4/upgrade.go index 81c8ebf1a75..87d2289cd85 100644 --- a/pkg/skaffold/schema/v1alpha4/upgrade.go +++ b/pkg/skaffold/schema/v1alpha4/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha4/upgrade_test.go b/pkg/skaffold/schema/v1alpha4/upgrade_test.go index a714230e937..318bb6daed5 100644 --- a/pkg/skaffold/schema/v1alpha4/upgrade_test.go +++ b/pkg/skaffold/schema/v1alpha4/upgrade_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -80,10 +80,10 @@ profiles: manifests: - k8s-* ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } -func verityUpgrade(t *testing.T, input, output string) { +func verifyUpgrade(t *testing.T, input, output string) { pipeline := NewSkaffoldPipeline() err := yaml.UnmarshalStrict([]byte(input), pipeline) testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) diff --git a/pkg/skaffold/schema/v1alpha5/config.go b/pkg/skaffold/schema/v1alpha5/config.go index 07c344add41..9b9d70413a8 100644 --- a/pkg/skaffold/schema/v1alpha5/config.go +++ b/pkg/skaffold/schema/v1alpha5/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha5/upgrade.go b/pkg/skaffold/schema/v1alpha5/upgrade.go index 937fa837fcb..a6f1a3ac018 100644 --- a/pkg/skaffold/schema/v1alpha5/upgrade.go +++ b/pkg/skaffold/schema/v1alpha5/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1alpha5/upgrade_test.go b/pkg/skaffold/schema/v1alpha5/upgrade_test.go index 3f108104a20..4fc20d97fe4 100644 --- a/pkg/skaffold/schema/v1alpha5/upgrade_test.go +++ b/pkg/skaffold/schema/v1alpha5/upgrade_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -112,7 +112,7 @@ profiles: manifests: - k8s-* ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } func upgradeShouldFailt(t *testing.T, input string) { @@ -124,7 +124,7 @@ func upgradeShouldFailt(t *testing.T, input string) { testutil.CheckError(t, true, err) } -func verityUpgrade(t *testing.T, input, output string) { +func verifyUpgrade(t *testing.T, input, output string) { pipeline := NewSkaffoldPipeline() err := yaml.UnmarshalStrict([]byte(input), pipeline) testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) diff --git a/pkg/skaffold/schema/v1beta1/config.go b/pkg/skaffold/schema/v1beta1/config.go index fa3f99a330e..54c802ec37e 100644 --- a/pkg/skaffold/schema/v1beta1/config.go +++ b/pkg/skaffold/schema/v1beta1/config.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/schema/v1beta1/upgrade.go b/pkg/skaffold/schema/v1beta1/upgrade.go index 1f74128f0e9..94bf01fed1a 100644 --- a/pkg/skaffold/schema/v1beta1/upgrade.go +++ b/pkg/skaffold/schema/v1beta1/upgrade.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ import ( "github.com/pkg/errors" - next "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" + next "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta2" ) // Upgrade upgrades a configuration to the next version. diff --git a/pkg/skaffold/schema/v1beta1/upgrade_test.go b/pkg/skaffold/schema/v1beta1/upgrade_test.go index efd4532e7d0..b0fe5d216e0 100644 --- a/pkg/skaffold/schema/v1beta1/upgrade_test.go +++ b/pkg/skaffold/schema/v1beta1/upgrade_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package v1beta1 import ( "testing" - "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta2" "github.com/GoogleContainerTools/skaffold/testutil" yaml "gopkg.in/yaml.v2" ) @@ -93,10 +93,10 @@ profiles: manifests: - k8s-* ` - verityUpgrade(t, yaml, expected) + verifyUpgrade(t, yaml, expected) } -func verityUpgrade(t *testing.T, input, output string) { +func verifyUpgrade(t *testing.T, input, output string) { pipeline := NewSkaffoldPipeline() err := yaml.UnmarshalStrict([]byte(input), pipeline) testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) @@ -104,7 +104,7 @@ func verityUpgrade(t *testing.T, input, output string) { upgraded, err := pipeline.Upgrade() testutil.CheckError(t, false, err) - expected := latest.NewSkaffoldPipeline() + expected := v1beta2.NewSkaffoldPipeline() err = yaml.UnmarshalStrict([]byte(output), expected) testutil.CheckErrorAndDeepEqual(t, false, err, expected, upgraded) diff --git a/pkg/skaffold/schema/v1beta2/config.go b/pkg/skaffold/schema/v1beta2/config.go new file mode 100644 index 00000000000..c0e98826bde --- /dev/null +++ b/pkg/skaffold/schema/v1beta2/config.go @@ -0,0 +1,272 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" +) + +const Version string = "skaffold/v1beta2" + +// NewSkaffoldPipeline creates a SkaffoldPipeline +func NewSkaffoldPipeline() util.VersionedConfig { + return new(SkaffoldPipeline) +} + +type SkaffoldPipeline struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + + Build BuildConfig `yaml:"build,omitempty"` + Test TestConfig `yaml:"test,omitempty"` + Deploy DeployConfig `yaml:"deploy,omitempty"` + Profiles []Profile `yaml:"profiles,omitempty"` +} + +func (c *SkaffoldPipeline) GetVersion() string { + return c.APIVersion +} + +// BuildConfig contains all the configuration for the build steps +type BuildConfig struct { + Artifacts []*Artifact `yaml:"artifacts,omitempty"` + TagPolicy TagPolicy `yaml:"tagPolicy,omitempty"` + BuildType `yaml:",inline"` +} + +// TagPolicy contains all the configuration for the tagging step +type TagPolicy struct { + GitTagger *GitTagger `yaml:"gitCommit,omitempty" yamltags:"oneOf=tag"` + ShaTagger *ShaTagger `yaml:"sha256,omitempty" yamltags:"oneOf=tag"` + EnvTemplateTagger *EnvTemplateTagger `yaml:"envTemplate,omitempty" yamltags:"oneOf=tag"` + DateTimeTagger *DateTimeTagger `yaml:"dateTime,omitempty" yamltags:"oneOf=tag"` +} + +// ShaTagger contains the configuration for the SHA tagger. +type ShaTagger struct{} + +// GitTagger contains the configuration for the git tagger. +type GitTagger struct{} + +// EnvTemplateTagger contains the configuration for the envTemplate tagger. +type EnvTemplateTagger struct { + Template string `yaml:"template,omitempty"` +} + +// DateTimeTagger contains the configuration for the DateTime tagger. +type DateTimeTagger struct { + Format string `yaml:"format,omitempty"` + TimeZone string `yaml:"timezone,omitempty"` +} + +// BuildType contains the specific implementation and parameters needed +// for the build step. Only one field should be populated. +type BuildType struct { + LocalBuild *LocalBuild `yaml:"local,omitempty" yamltags:"oneOf=build"` + GoogleCloudBuild *GoogleCloudBuild `yaml:"googleCloudBuild,omitempty" yamltags:"oneOf=build"` + KanikoBuild *KanikoBuild `yaml:"kaniko,omitempty" yamltags:"oneOf=build"` +} + +// LocalBuild contains the fields needed to do a build on the local docker daemon +// and optionally push to a repository. +type LocalBuild struct { + Push *bool `yaml:"push,omitempty"` + UseDockerCLI bool `yaml:"useDockerCLI,omitempty"` + UseBuildkit bool `yaml:"useBuildkit,omitempty"` +} + +// GoogleCloudBuild contains the fields needed to do a remote build on +// Google Cloud Build. +type GoogleCloudBuild struct { + ProjectID string `yaml:"projectId,omitempty"` + DiskSizeGb int64 `yaml:"diskSizeGb,omitempty"` + MachineType string `yaml:"machineType,omitempty"` + Timeout string `yaml:"timeout,omitempty"` + DockerImage string `yaml:"dockerImage,omitempty"` +} + +// LocalDir represents the local directory kaniko build context +type LocalDir struct { +} + +// KanikoBuildContext contains the different fields available to specify +// a kaniko build context +type KanikoBuildContext struct { + GCSBucket string `yaml:"gcsBucket,omitempty" yamltags:"oneOf=buildContext"` + LocalDir *LocalDir `yaml:"localDir,omitempty" yamltags:"oneOf=buildContext"` +} + +// KanikoCache contains fields related to kaniko caching +type KanikoCache struct { + Repo string `yaml:"repo,omitempty"` +} + +// KanikoBuild contains the fields needed to do a on-cluster build using +// the kaniko image +type KanikoBuild struct { + BuildContext *KanikoBuildContext `yaml:"buildContext,omitempty"` + Cache *KanikoCache `yaml:"cache,omitempty"` + AdditionalFlags []string `yaml:"flags,omitempty"` + PullSecret string `yaml:"pullSecret,omitempty"` + PullSecretName string `yaml:"pullSecretName,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Timeout string `yaml:"timeout,omitempty"` + Image string `yaml:"image,omitempty"` +} + +type TestConfig []*TestCase + +// TestCase is a struct containing all the specified test +// configuration for an image. +type TestCase struct { + ImageName string `yaml:"image"` + StructureTests []string `yaml:"structureTests,omitempty"` +} + +// DeployConfig contains all the configuration needed by the deploy steps +type DeployConfig struct { + DeployType `yaml:",inline"` +} + +// DeployType contains the specific implementation and parameters needed +// for the deploy step. Only one field should be populated. +type DeployType struct { + HelmDeploy *HelmDeploy `yaml:"helm,omitempty" yamltags:"oneOf=deploy"` + KubectlDeploy *KubectlDeploy `yaml:"kubectl,omitempty" yamltags:"oneOf=deploy"` + KustomizeDeploy *KustomizeDeploy `yaml:"kustomize,omitempty" yamltags:"oneOf=deploy"` +} + +// KubectlDeploy contains the configuration needed for deploying with `kubectl apply` +type KubectlDeploy struct { + Manifests []string `yaml:"manifests,omitempty"` + RemoteManifests []string `yaml:"remoteManifests,omitempty"` + Flags KubectlFlags `yaml:"flags,omitempty"` +} + +// KubectlFlags describes additional options flags that are passed on the command +// line to kubectl either on every command (Global), on creations (Apply) +// or deletions (Delete). +type KubectlFlags struct { + Global []string `yaml:"global,omitempty"` + Apply []string `yaml:"apply,omitempty"` + Delete []string `yaml:"delete,omitempty"` +} + +// HelmDeploy contains the configuration needed for deploying with helm +type HelmDeploy struct { + Releases []HelmRelease `yaml:"releases,omitempty"` +} + +// KustomizeDeploy contains the configuration needed for deploying with kustomize. +type KustomizeDeploy struct { + KustomizePath string `yaml:"path,omitempty"` + Flags KubectlFlags `yaml:"flags,omitempty"` +} + +type HelmRelease struct { + Name string `yaml:"name,omitempty"` + ChartPath string `yaml:"chartPath,omitempty"` + ValuesFiles []string `yaml:"valuesFiles,omitempty"` + Values map[string]string `yaml:"values,omitempty,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Version string `yaml:"version,omitempty"` + SetValues map[string]string `yaml:"setValues,omitempty"` + SetValueTemplates map[string]string `yaml:"setValueTemplates,omitempty"` + Wait bool `yaml:"wait,omitempty"` + RecreatePods bool `yaml:"recreatePods,omitempty"` + Overrides map[string]interface{} `yaml:"overrides,omitempty"` + Packaged *HelmPackaged `yaml:"packaged,omitempty"` + ImageStrategy HelmImageStrategy `yaml:"imageStrategy,omitempty"` +} + +// HelmPackaged represents parameters for packaging helm chart. +type HelmPackaged struct { + // Version sets the version on the chart to this semver version. + Version string `yaml:"version,omitempty"` + + // AppVersion set the appVersion on the chart to this version + AppVersion string `yaml:"appVersion,omitempty"` +} + +type HelmImageStrategy struct { + HelmImageConfig `yaml:",inline"` +} + +type HelmImageConfig struct { + HelmFQNConfig *HelmFQNConfig `yaml:"fqn,omitempty"` + HelmConventionConfig *HelmConventionConfig `yaml:"helm,omitempty"` +} + +// HelmFQNConfig represents image config to use the FullyQualifiedImageName as param to set +type HelmFQNConfig struct { + Property string `yaml:"property,omitempty"` +} + +// HelmConventionConfig represents image config in the syntax of image.repository and image.tag +type HelmConventionConfig struct { +} + +// Artifact represents items that need to be built, along with the context in which +// they should be built. +type Artifact struct { + ImageName string `yaml:"image,omitempty"` + Workspace string `yaml:"context,omitempty"` + Sync map[string]string `yaml:"sync,omitempty"` + ArtifactType `yaml:",inline"` +} + +// Profile is additional configuration that overrides default +// configuration when it is activated. +type Profile struct { + Name string `yaml:"name,omitempty"` + Build BuildConfig `yaml:"build,omitempty"` + Test TestConfig `yaml:"test,omitempty"` + Deploy DeployConfig `yaml:"deploy,omitempty"` +} + +type ArtifactType struct { + DockerArtifact *DockerArtifact `yaml:"docker,omitempty" yamltags:"oneOf=artifact"` + BazelArtifact *BazelArtifact `yaml:"bazel,omitempty" yamltags:"oneOf=artifact"` + JibMavenArtifact *JibMavenArtifact `yaml:"jibMaven,omitempty" yamltags:"oneOf=artifact"` + JibGradleArtifact *JibGradleArtifact `yaml:"jibGradle,omitempty" yamltags:"oneOf=artifact"` +} + +// DockerArtifact describes an artifact built from a Dockerfile, +// usually using `docker build`. +type DockerArtifact struct { + DockerfilePath string `yaml:"dockerfile,omitempty"` + BuildArgs map[string]*string `yaml:"buildArgs,omitempty"` + CacheFrom []string `yaml:"cacheFrom,omitempty"` + Target string `yaml:"target,omitempty"` +} + +// BazelArtifact describes an artifact built with Bazel. +type BazelArtifact struct { + BuildTarget string `yaml:"target,omitempty"` + BuildArgs []string `yaml:"args,omitempty"` +} + +type JibMavenArtifact struct { + // Only multi-module + Module string `yaml:"module"` + Profile string `yaml:"profile"` +} + +type JibGradleArtifact struct { + // Only multi-module + Project string `yaml:"project"` +} diff --git a/pkg/skaffold/schema/v1beta2/upgrade.go b/pkg/skaffold/schema/v1beta2/upgrade.go new file mode 100644 index 00000000000..80f6fd13b60 --- /dev/null +++ b/pkg/skaffold/schema/v1beta2/upgrade.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "encoding/json" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" + next "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta3" + "github.com/pkg/errors" +) + +// Upgrade upgrades a configuration to the next version. +// Config changes from v1beta2 to v1beta3 +// 1. Additions: +// gcb builder mvn image +// gcb builder gradle image +// 2. No removals +// 3. No updates +func (config *SkaffoldPipeline) Upgrade() (util.VersionedConfig, error) { + // convert Deploy (should be the same) + var newDeploy next.DeployConfig + if err := convert(config.Deploy, &newDeploy); err != nil { + return nil, errors.Wrap(err, "converting deploy config") + } + + // convert Profiles (should be the same) + var newProfiles []next.Profile + if config.Profiles != nil { + if err := convert(config.Profiles, &newProfiles); err != nil { + return nil, errors.Wrap(err, "converting new profile") + } + } + // convert Build (should be the same) + var newBuild next.BuildConfig + if err := convert(config.Build, &newBuild); err != nil { + return nil, errors.Wrap(err, "converting new build") + } + + // convert Test (should be the same) + var newTest next.TestConfig + if err := convert(config.Test, &newTest); err != nil { + return nil, errors.Wrap(err, "converting new test") + } + + return &next.SkaffoldPipeline{ + APIVersion: next.Version, + Kind: config.Kind, + Build: newBuild, + Test: newTest, + Deploy: newDeploy, + Profiles: newProfiles, + }, nil +} + +func convert(old interface{}, new interface{}) error { + o, err := json.Marshal(old) + if err != nil { + return errors.Wrap(err, "marshalling old") + } + if err := json.Unmarshal(o, &new); err != nil { + return errors.Wrap(err, "unmarshalling new") + } + return nil +} diff --git a/pkg/skaffold/schema/v1beta2/upgrade_test.go b/pkg/skaffold/schema/v1beta2/upgrade_test.go new file mode 100644 index 00000000000..1fe8dfd0a2c --- /dev/null +++ b/pkg/skaffold/schema/v1beta2/upgrade_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta3" + "github.com/GoogleContainerTools/skaffold/testutil" + yaml "gopkg.in/yaml.v2" +) + +func TestUpgrade(t *testing.T) { + yaml := `apiVersion: skaffold/v1beta2 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* +deploy: + kubectl: + manifests: + - k8s-* +profiles: + - name: test profile + build: + kaniko: + buildContext: + gcsBucket: skaffold-kaniko + pullSecretName: e2esecret + namespace: default + cache: {} + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example + test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* + deploy: + kubectl: + manifests: + - k8s-* +` + expected := `apiVersion: skaffold/v1beta3 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* +deploy: + kubectl: + manifests: + - k8s-* +profiles: + - name: test profile + build: + kaniko: + buildContext: + gcsBucket: skaffold-kaniko + pullSecretName: e2esecret + namespace: default + cache: {} + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example + test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* + deploy: + kubectl: + manifests: + - k8s-* +` + verifyUpgrade(t, yaml, expected) +} + +func verifyUpgrade(t *testing.T, input, output string) { + pipeline := NewSkaffoldPipeline() + err := yaml.UnmarshalStrict([]byte(input), pipeline) + testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) + + upgraded, err := pipeline.Upgrade() + testutil.CheckError(t, false, err) + + expected := v1beta3.NewSkaffoldPipeline() + err = yaml.UnmarshalStrict([]byte(output), expected) + + testutil.CheckErrorAndDeepEqual(t, false, err, expected, upgraded) +} diff --git a/pkg/skaffold/schema/v1beta3/config.go b/pkg/skaffold/schema/v1beta3/config.go new file mode 100644 index 00000000000..d99180897e2 --- /dev/null +++ b/pkg/skaffold/schema/v1beta3/config.go @@ -0,0 +1,281 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" +) + +const Version string = "skaffold/v1beta3" + +// NewSkaffoldPipeline creates a SkaffoldPipeline +func NewSkaffoldPipeline() util.VersionedConfig { + return new(SkaffoldPipeline) +} + +type SkaffoldPipeline struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + + Build BuildConfig `yaml:"build,omitempty"` + Test TestConfig `yaml:"test,omitempty"` + Deploy DeployConfig `yaml:"deploy,omitempty"` + Profiles []Profile `yaml:"profiles,omitempty"` +} + +func (c *SkaffoldPipeline) GetVersion() string { + return c.APIVersion +} + +// BuildConfig contains all the configuration for the build steps +type BuildConfig struct { + Artifacts []*Artifact `yaml:"artifacts,omitempty"` + TagPolicy TagPolicy `yaml:"tagPolicy,omitempty"` + BuildType `yaml:",inline"` +} + +// TagPolicy contains all the configuration for the tagging step +type TagPolicy struct { + GitTagger *GitTagger `yaml:"gitCommit,omitempty" yamltags:"oneOf=tag"` + ShaTagger *ShaTagger `yaml:"sha256,omitempty" yamltags:"oneOf=tag"` + EnvTemplateTagger *EnvTemplateTagger `yaml:"envTemplate,omitempty" yamltags:"oneOf=tag"` + DateTimeTagger *DateTimeTagger `yaml:"dateTime,omitempty" yamltags:"oneOf=tag"` +} + +// ShaTagger contains the configuration for the SHA tagger. +type ShaTagger struct{} + +// GitTagger contains the configuration for the git tagger. +type GitTagger struct{} + +// EnvTemplateTagger contains the configuration for the envTemplate tagger. +type EnvTemplateTagger struct { + Template string `yaml:"template,omitempty"` +} + +// DateTimeTagger contains the configuration for the DateTime tagger. +type DateTimeTagger struct { + Format string `yaml:"format,omitempty"` + TimeZone string `yaml:"timezone,omitempty"` +} + +// BuildType contains the specific implementation and parameters needed +// for the build step. Only one field should be populated. +type BuildType struct { + LocalBuild *LocalBuild `yaml:"local,omitempty" yamltags:"oneOf=build"` + GoogleCloudBuild *GoogleCloudBuild `yaml:"googleCloudBuild,omitempty" yamltags:"oneOf=build"` + KanikoBuild *KanikoBuild `yaml:"kaniko,omitempty" yamltags:"oneOf=build"` +} + +// LocalBuild contains the fields needed to do a build on the local docker daemon +// and optionally push to a repository. +type LocalBuild struct { + Push *bool `yaml:"push,omitempty"` + UseDockerCLI bool `yaml:"useDockerCLI,omitempty"` + UseBuildkit bool `yaml:"useBuildkit,omitempty"` +} + +// GoogleCloudBuild contains the fields needed to do a remote build on +// Google Cloud Build. +type GoogleCloudBuild struct { + ProjectID string `yaml:"projectId,omitempty"` + DiskSizeGb int64 `yaml:"diskSizeGb,omitempty"` + MachineType string `yaml:"machineType,omitempty"` + Timeout string `yaml:"timeout,omitempty"` + DockerImage string `yaml:"dockerImage,omitempty"` + MavenImage string `yaml:"mavenImage,omitempty"` + GradleImage string `yaml:"gradleImage,omitempty"` +} + +// LocalDir represents the local directory kaniko build context +type LocalDir struct { +} + +// KanikoBuildContext contains the different fields available to specify +// a kaniko build context +type KanikoBuildContext struct { + GCSBucket string `yaml:"gcsBucket,omitempty" yamltags:"oneOf=buildContext"` + LocalDir *LocalDir `yaml:"localDir,omitempty" yamltags:"oneOf=buildContext"` +} + +// KanikoCache contains fields related to kaniko caching +type KanikoCache struct { + Repo string `yaml:"repo,omitempty"` +} + +// KanikoBuild contains the fields needed to do a on-cluster build using +// the kaniko image +type KanikoBuild struct { + BuildContext *KanikoBuildContext `yaml:"buildContext,omitempty"` + Cache *KanikoCache `yaml:"cache,omitempty"` + AdditionalFlags []string `yaml:"flags,omitempty"` + PullSecret string `yaml:"pullSecret,omitempty"` + PullSecretName string `yaml:"pullSecretName,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Timeout string `yaml:"timeout,omitempty"` + Image string `yaml:"image,omitempty"` + DockerConfig *DockerConfig `yaml:"dockerConfig,omitempty"` +} + +// DockerConfig contains information about the docker config.json to mount +type DockerConfig struct { + Path string `yaml:"path,omitempty"` + SecretName string `yaml:"secretName,omitempty"` +} + +type TestConfig []*TestCase + +// TestCase is a struct containing all the specified test +// configuration for an image. +type TestCase struct { + ImageName string `yaml:"image"` + StructureTests []string `yaml:"structureTests,omitempty"` +} + +// DeployConfig contains all the configuration needed by the deploy steps +type DeployConfig struct { + DeployType `yaml:",inline"` +} + +// DeployType contains the specific implementation and parameters needed +// for the deploy step. Only one field should be populated. +type DeployType struct { + HelmDeploy *HelmDeploy `yaml:"helm,omitempty" yamltags:"oneOf=deploy"` + KubectlDeploy *KubectlDeploy `yaml:"kubectl,omitempty" yamltags:"oneOf=deploy"` + KustomizeDeploy *KustomizeDeploy `yaml:"kustomize,omitempty" yamltags:"oneOf=deploy"` +} + +// KubectlDeploy contains the configuration needed for deploying with `kubectl apply` +type KubectlDeploy struct { + Manifests []string `yaml:"manifests,omitempty"` + RemoteManifests []string `yaml:"remoteManifests,omitempty"` + Flags KubectlFlags `yaml:"flags,omitempty"` +} + +// KubectlFlags describes additional options flags that are passed on the command +// line to kubectl either on every command (Global), on creations (Apply) +// or deletions (Delete). +type KubectlFlags struct { + Global []string `yaml:"global,omitempty"` + Apply []string `yaml:"apply,omitempty"` + Delete []string `yaml:"delete,omitempty"` +} + +// HelmDeploy contains the configuration needed for deploying with helm +type HelmDeploy struct { + Releases []HelmRelease `yaml:"releases,omitempty"` +} + +// KustomizeDeploy contains the configuration needed for deploying with kustomize. +type KustomizeDeploy struct { + KustomizePath string `yaml:"path,omitempty"` + Flags KubectlFlags `yaml:"flags,omitempty"` +} + +type HelmRelease struct { + Name string `yaml:"name,omitempty"` + ChartPath string `yaml:"chartPath,omitempty"` + ValuesFiles []string `yaml:"valuesFiles,omitempty"` + Values map[string]string `yaml:"values,omitempty,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Version string `yaml:"version,omitempty"` + SetValues map[string]string `yaml:"setValues,omitempty"` + SetValueTemplates map[string]string `yaml:"setValueTemplates,omitempty"` + Wait bool `yaml:"wait,omitempty"` + RecreatePods bool `yaml:"recreatePods,omitempty"` + Overrides map[string]interface{} `yaml:"overrides,omitempty"` + Packaged *HelmPackaged `yaml:"packaged,omitempty"` + ImageStrategy HelmImageStrategy `yaml:"imageStrategy,omitempty"` +} + +// HelmPackaged represents parameters for packaging helm chart. +type HelmPackaged struct { + // Version sets the version on the chart to this semver version. + Version string `yaml:"version,omitempty"` + + // AppVersion set the appVersion on the chart to this version + AppVersion string `yaml:"appVersion,omitempty"` +} + +type HelmImageStrategy struct { + HelmImageConfig `yaml:",inline"` +} + +type HelmImageConfig struct { + HelmFQNConfig *HelmFQNConfig `yaml:"fqn,omitempty"` + HelmConventionConfig *HelmConventionConfig `yaml:"helm,omitempty"` +} + +// HelmFQNConfig represents image config to use the FullyQualifiedImageName as param to set +type HelmFQNConfig struct { + Property string `yaml:"property,omitempty"` +} + +// HelmConventionConfig represents image config in the syntax of image.repository and image.tag +type HelmConventionConfig struct { +} + +// Artifact represents items that need to be built, along with the context in which +// they should be built. +type Artifact struct { + ImageName string `yaml:"image,omitempty"` + Workspace string `yaml:"context,omitempty"` + Sync map[string]string `yaml:"sync,omitempty"` + ArtifactType `yaml:",inline"` +} + +// Profile is additional configuration that overrides default +// configuration when it is activated. +type Profile struct { + Name string `yaml:"name,omitempty"` + Build BuildConfig `yaml:"build,omitempty"` + Test TestConfig `yaml:"test,omitempty"` + Deploy DeployConfig `yaml:"deploy,omitempty"` +} + +type ArtifactType struct { + DockerArtifact *DockerArtifact `yaml:"docker,omitempty" yamltags:"oneOf=artifact"` + BazelArtifact *BazelArtifact `yaml:"bazel,omitempty" yamltags:"oneOf=artifact"` + JibMavenArtifact *JibMavenArtifact `yaml:"jibMaven,omitempty" yamltags:"oneOf=artifact"` + JibGradleArtifact *JibGradleArtifact `yaml:"jibGradle,omitempty" yamltags:"oneOf=artifact"` +} + +// DockerArtifact describes an artifact built from a Dockerfile, +// usually using `docker build`. +type DockerArtifact struct { + DockerfilePath string `yaml:"dockerfile,omitempty"` + BuildArgs map[string]*string `yaml:"buildArgs,omitempty"` + CacheFrom []string `yaml:"cacheFrom,omitempty"` + Target string `yaml:"target,omitempty"` +} + +// BazelArtifact describes an artifact built with Bazel. +type BazelArtifact struct { + BuildTarget string `yaml:"target,omitempty"` + BuildArgs []string `yaml:"args,omitempty"` +} + +type JibMavenArtifact struct { + // Only multi-module + Module string `yaml:"module"` + Profile string `yaml:"profile"` +} + +type JibGradleArtifact struct { + // Only multi-module + Project string `yaml:"project"` +} diff --git a/pkg/skaffold/schema/v1beta3/upgrade.go b/pkg/skaffold/schema/v1beta3/upgrade.go new file mode 100644 index 00000000000..7ce89c851d3 --- /dev/null +++ b/pkg/skaffold/schema/v1beta3/upgrade.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + "encoding/json" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" + next "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta4" + "github.com/pkg/errors" +) + +// Upgrade upgrades a configuration to the next version. +// Config changes from v1beta3 to v1beta4 +// 1. Additions: +// 2. No removals +// 3. No updates +func (config *SkaffoldPipeline) Upgrade() (util.VersionedConfig, error) { + // convert Deploy (should be the same) + var newDeploy next.DeployConfig + if err := convert(config.Deploy, &newDeploy); err != nil { + return nil, errors.Wrap(err, "converting deploy config") + } + + // convert Profiles (should be the same) + var newProfiles []next.Profile + if config.Profiles != nil { + if err := convert(config.Profiles, &newProfiles); err != nil { + return nil, errors.Wrap(err, "converting new profile") + } + } + // convert Build (should be the same) + var newBuild next.BuildConfig + if err := convert(config.Build, &newBuild); err != nil { + return nil, errors.Wrap(err, "converting new build") + } + + // convert Test (should be the same) + var newTest next.TestConfig + if err := convert(config.Test, &newTest); err != nil { + return nil, errors.Wrap(err, "converting new test") + } + + return &next.SkaffoldPipeline{ + APIVersion: next.Version, + Kind: config.Kind, + Build: newBuild, + Test: newTest, + Deploy: newDeploy, + Profiles: newProfiles, + }, nil +} + +func convert(old interface{}, new interface{}) error { + o, err := json.Marshal(old) + if err != nil { + return errors.Wrap(err, "marshalling old") + } + if err := json.Unmarshal(o, &new); err != nil { + return errors.Wrap(err, "unmarshalling new") + } + return nil +} diff --git a/pkg/skaffold/schema/v1beta3/upgrade_test.go b/pkg/skaffold/schema/v1beta3/upgrade_test.go new file mode 100644 index 00000000000..a88cea0e97e --- /dev/null +++ b/pkg/skaffold/schema/v1beta3/upgrade_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta4" + "github.com/GoogleContainerTools/skaffold/testutil" + yaml "gopkg.in/yaml.v2" +) + +func TestUpgrade(t *testing.T) { + yaml := `apiVersion: skaffold/v1beta3 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* +deploy: + kubectl: + manifests: + - k8s-* +profiles: + - name: test profile + build: + kaniko: + buildContext: + gcsBucket: skaffold-kaniko + pullSecretName: e2esecret + namespace: default + cache: {} + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example + test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* + deploy: + kubectl: + manifests: + - k8s-* +` + expected := `apiVersion: skaffold/v1beta4 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* +deploy: + kubectl: + manifests: + - k8s-* +profiles: + - name: test profile + build: + kaniko: + buildContext: + gcsBucket: skaffold-kaniko + pullSecretName: e2esecret + namespace: default + cache: {} + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example + test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* + deploy: + kubectl: + manifests: + - k8s-* +` + verifyUpgrade(t, yaml, expected) +} + +func verifyUpgrade(t *testing.T, input, output string) { + pipeline := NewSkaffoldPipeline() + err := yaml.UnmarshalStrict([]byte(input), pipeline) + testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) + + upgraded, err := pipeline.Upgrade() + testutil.CheckError(t, false, err) + + expected := v1beta4.NewSkaffoldPipeline() + err = yaml.UnmarshalStrict([]byte(output), expected) + + testutil.CheckErrorAndDeepEqual(t, false, err, expected, upgraded) +} diff --git a/pkg/skaffold/schema/v1beta4/config.go b/pkg/skaffold/schema/v1beta4/config.go new file mode 100644 index 00000000000..6bc194f4816 --- /dev/null +++ b/pkg/skaffold/schema/v1beta4/config.go @@ -0,0 +1,292 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta4 + +import ( + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" + yamlpatch "github.com/krishicks/yaml-patch" +) + +const Version string = "skaffold/v1beta4" + +// NewSkaffoldPipeline creates a SkaffoldPipeline +func NewSkaffoldPipeline() util.VersionedConfig { + return new(SkaffoldPipeline) +} + +type SkaffoldPipeline struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + + Build BuildConfig `yaml:"build,omitempty"` + Test TestConfig `yaml:"test,omitempty"` + Deploy DeployConfig `yaml:"deploy,omitempty"` + Profiles []Profile `yaml:"profiles,omitempty"` +} + +func (c *SkaffoldPipeline) GetVersion() string { + return c.APIVersion +} + +// BuildConfig contains all the configuration for the build steps +type BuildConfig struct { + Artifacts []*Artifact `yaml:"artifacts,omitempty"` + TagPolicy TagPolicy `yaml:"tagPolicy,omitempty"` + BuildType `yaml:",inline"` +} + +// TagPolicy contains all the configuration for the tagging step +type TagPolicy struct { + GitTagger *GitTagger `yaml:"gitCommit,omitempty" yamltags:"oneOf=tag"` + ShaTagger *ShaTagger `yaml:"sha256,omitempty" yamltags:"oneOf=tag"` + EnvTemplateTagger *EnvTemplateTagger `yaml:"envTemplate,omitempty" yamltags:"oneOf=tag"` + DateTimeTagger *DateTimeTagger `yaml:"dateTime,omitempty" yamltags:"oneOf=tag"` +} + +// ShaTagger contains the configuration for the SHA tagger. +type ShaTagger struct{} + +// GitTagger contains the configuration for the git tagger. +type GitTagger struct{} + +// EnvTemplateTagger contains the configuration for the envTemplate tagger. +type EnvTemplateTagger struct { + Template string `yaml:"template,omitempty"` +} + +// DateTimeTagger contains the configuration for the DateTime tagger. +type DateTimeTagger struct { + Format string `yaml:"format,omitempty"` + TimeZone string `yaml:"timezone,omitempty"` +} + +// BuildType contains the specific implementation and parameters needed +// for the build step. Only one field should be populated. +type BuildType struct { + LocalBuild *LocalBuild `yaml:"local,omitempty" yamltags:"oneOf=build"` + GoogleCloudBuild *GoogleCloudBuild `yaml:"googleCloudBuild,omitempty" yamltags:"oneOf=build"` + KanikoBuild *KanikoBuild `yaml:"kaniko,omitempty" yamltags:"oneOf=build"` +} + +// LocalBuild contains the fields needed to do a build on the local docker daemon +// and optionally push to a repository. +type LocalBuild struct { + Push *bool `yaml:"push,omitempty"` + UseDockerCLI bool `yaml:"useDockerCLI,omitempty"` + UseBuildkit bool `yaml:"useBuildkit,omitempty"` +} + +// GoogleCloudBuild contains the fields needed to do a remote build on +// Google Cloud Build. +type GoogleCloudBuild struct { + ProjectID string `yaml:"projectId,omitempty"` + DiskSizeGb int64 `yaml:"diskSizeGb,omitempty"` + MachineType string `yaml:"machineType,omitempty"` + Timeout string `yaml:"timeout,omitempty"` + DockerImage string `yaml:"dockerImage,omitempty"` + MavenImage string `yaml:"mavenImage,omitempty"` + GradleImage string `yaml:"gradleImage,omitempty"` +} + +// LocalDir represents the local directory kaniko build context +type LocalDir struct { +} + +// KanikoBuildContext contains the different fields available to specify +// a kaniko build context +type KanikoBuildContext struct { + GCSBucket string `yaml:"gcsBucket,omitempty" yamltags:"oneOf=buildContext"` + LocalDir *LocalDir `yaml:"localDir,omitempty" yamltags:"oneOf=buildContext"` +} + +// KanikoCache contains fields related to kaniko caching +type KanikoCache struct { + Repo string `yaml:"repo,omitempty"` +} + +// KanikoBuild contains the fields needed to do a on-cluster build using +// the kaniko image +type KanikoBuild struct { + BuildContext *KanikoBuildContext `yaml:"buildContext,omitempty"` + Cache *KanikoCache `yaml:"cache,omitempty"` + AdditionalFlags []string `yaml:"flags,omitempty"` + PullSecret string `yaml:"pullSecret,omitempty"` + PullSecretName string `yaml:"pullSecretName,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Timeout string `yaml:"timeout,omitempty"` + Image string `yaml:"image,omitempty"` + DockerConfig *DockerConfig `yaml:"dockerConfig,omitempty"` +} + +// DockerConfig contains information about the docker config.json to mount +type DockerConfig struct { + Path string `yaml:"path,omitempty"` + SecretName string `yaml:"secretName,omitempty"` +} + +type TestConfig []*TestCase + +// TestCase is a struct containing all the specified test +// configuration for an image. +type TestCase struct { + ImageName string `yaml:"image"` + StructureTests []string `yaml:"structureTests,omitempty"` +} + +// DeployConfig contains all the configuration needed by the deploy steps +type DeployConfig struct { + DeployType `yaml:",inline"` +} + +// DeployType contains the specific implementation and parameters needed +// for the deploy step. Only one field should be populated. +type DeployType struct { + HelmDeploy *HelmDeploy `yaml:"helm,omitempty" yamltags:"oneOf=deploy"` + KubectlDeploy *KubectlDeploy `yaml:"kubectl,omitempty" yamltags:"oneOf=deploy"` + KustomizeDeploy *KustomizeDeploy `yaml:"kustomize,omitempty" yamltags:"oneOf=deploy"` +} + +// KubectlDeploy contains the configuration needed for deploying with `kubectl apply` +type KubectlDeploy struct { + Manifests []string `yaml:"manifests,omitempty"` + RemoteManifests []string `yaml:"remoteManifests,omitempty"` + Flags KubectlFlags `yaml:"flags,omitempty"` +} + +// KubectlFlags describes additional options flags that are passed on the command +// line to kubectl either on every command (Global), on creations (Apply) +// or deletions (Delete). +type KubectlFlags struct { + Global []string `yaml:"global,omitempty"` + Apply []string `yaml:"apply,omitempty"` + Delete []string `yaml:"delete,omitempty"` +} + +// HelmDeploy contains the configuration needed for deploying with helm +type HelmDeploy struct { + Releases []HelmRelease `yaml:"releases,omitempty"` +} + +// KustomizeDeploy contains the configuration needed for deploying with kustomize. +type KustomizeDeploy struct { + KustomizePath string `yaml:"path,omitempty"` + Flags KubectlFlags `yaml:"flags,omitempty"` +} + +type HelmRelease struct { + Name string `yaml:"name,omitempty"` + ChartPath string `yaml:"chartPath,omitempty"` + ValuesFiles []string `yaml:"valuesFiles,omitempty"` + Values map[string]string `yaml:"values,omitempty,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Version string `yaml:"version,omitempty"` + SetValues map[string]string `yaml:"setValues,omitempty"` + SetValueTemplates map[string]string `yaml:"setValueTemplates,omitempty"` + Wait bool `yaml:"wait,omitempty"` + RecreatePods bool `yaml:"recreatePods,omitempty"` + SkipBuildDependencies bool `yaml:"skipBuildDependencies,omitempty"` + Overrides map[string]interface{} `yaml:"overrides,omitempty"` + Packaged *HelmPackaged `yaml:"packaged,omitempty"` + ImageStrategy HelmImageStrategy `yaml:"imageStrategy,omitempty"` +} + +// HelmPackaged represents parameters for packaging helm chart. +type HelmPackaged struct { + // Version sets the version on the chart to this semver version. + Version string `yaml:"version,omitempty"` + + // AppVersion set the appVersion on the chart to this version + AppVersion string `yaml:"appVersion,omitempty"` +} + +type HelmImageStrategy struct { + HelmImageConfig `yaml:",inline"` +} + +type HelmImageConfig struct { + HelmFQNConfig *HelmFQNConfig `yaml:"fqn,omitempty"` + HelmConventionConfig *HelmConventionConfig `yaml:"helm,omitempty"` +} + +// HelmFQNConfig represents image config to use the FullyQualifiedImageName as param to set +type HelmFQNConfig struct { + Property string `yaml:"property,omitempty"` +} + +// HelmConventionConfig represents image config in the syntax of image.repository and image.tag +type HelmConventionConfig struct { +} + +// Artifact represents items that need to be built, along with the context in which +// they should be built. +type Artifact struct { + ImageName string `yaml:"image,omitempty"` + Workspace string `yaml:"context,omitempty"` + Sync map[string]string `yaml:"sync,omitempty"` + ArtifactType `yaml:",inline"` +} + +// Profile is additional configuration that overrides default +// configuration when it is activated. +type Profile struct { + Name string `yaml:"name,omitempty"` + Build BuildConfig `yaml:"build,omitempty"` + Test TestConfig `yaml:"test,omitempty"` + Deploy DeployConfig `yaml:"deploy,omitempty"` + Patches yamlpatch.Patch `yaml:"patches,omitempty"` + Activation []Activation `yaml:"activation,omitempty"` +} + +// Activation defines criteria to auto-activate a profile. +type Activation struct { + Env string `yaml:"env,omitempty"` + KubeContext string `yaml:"kubeContext,omitempty"` + Command string `yaml:"command,omitempty"` +} + +type ArtifactType struct { + DockerArtifact *DockerArtifact `yaml:"docker,omitempty" yamltags:"oneOf=artifact"` + BazelArtifact *BazelArtifact `yaml:"bazel,omitempty" yamltags:"oneOf=artifact"` + JibMavenArtifact *JibMavenArtifact `yaml:"jibMaven,omitempty" yamltags:"oneOf=artifact"` + JibGradleArtifact *JibGradleArtifact `yaml:"jibGradle,omitempty" yamltags:"oneOf=artifact"` +} + +// DockerArtifact describes an artifact built from a Dockerfile, +// usually using `docker build`. +type DockerArtifact struct { + DockerfilePath string `yaml:"dockerfile,omitempty"` + BuildArgs map[string]*string `yaml:"buildArgs,omitempty"` + CacheFrom []string `yaml:"cacheFrom,omitempty"` + Target string `yaml:"target,omitempty"` +} + +// BazelArtifact describes an artifact built with Bazel. +type BazelArtifact struct { + BuildTarget string `yaml:"target,omitempty"` + BuildArgs []string `yaml:"args,omitempty"` +} + +type JibMavenArtifact struct { + // Only multi-module + Module string `yaml:"module"` + Profile string `yaml:"profile"` +} + +type JibGradleArtifact struct { + // Only multi-module + Project string `yaml:"project"` +} diff --git a/pkg/skaffold/schema/v1beta4/upgrade.go b/pkg/skaffold/schema/v1beta4/upgrade.go new file mode 100644 index 00000000000..730de8a63e6 --- /dev/null +++ b/pkg/skaffold/schema/v1beta4/upgrade.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta4 + +import ( + "encoding/json" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + next "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/util" + "github.com/pkg/errors" +) + +// Upgrade upgrades a configuration to the next version. +// Config changes from v1beta4 to v1beta5 +// 1. Additions: +// 2. No removals +// 3. No updates +func (config *SkaffoldPipeline) Upgrade() (util.VersionedConfig, error) { + // convert Deploy (should be the same) + var newDeploy next.DeployConfig + if err := convert(config.Deploy, &newDeploy); err != nil { + return nil, errors.Wrap(err, "converting deploy config") + } + + // convert Profiles (should be the same) + var newProfiles []next.Profile + if config.Profiles != nil { + if err := convert(config.Profiles, &newProfiles); err != nil { + return nil, errors.Wrap(err, "converting new profile") + } + } + // convert Build (should be the same) + var newBuild next.BuildConfig + if err := convert(config.Build, &newBuild); err != nil { + return nil, errors.Wrap(err, "converting new build") + } + + // convert Test (should be the same) + var newTest []*latest.TestCase + if err := convert(config.Test, &newTest); err != nil { + return nil, errors.Wrap(err, "converting new test") + } + + return &next.SkaffoldPipeline{ + APIVersion: next.Version, + Kind: config.Kind, + Build: newBuild, + Test: newTest, + Deploy: newDeploy, + Profiles: newProfiles, + }, nil +} + +func convert(old interface{}, new interface{}) error { + o, err := json.Marshal(old) + if err != nil { + return errors.Wrap(err, "marshalling old") + } + if err := json.Unmarshal(o, &new); err != nil { + return errors.Wrap(err, "unmarshalling new") + } + return nil +} diff --git a/pkg/skaffold/schema/v1beta4/upgrade_test.go b/pkg/skaffold/schema/v1beta4/upgrade_test.go new file mode 100644 index 00000000000..9e4d9fd965f --- /dev/null +++ b/pkg/skaffold/schema/v1beta4/upgrade_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta4 + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/testutil" + yaml "gopkg.in/yaml.v2" +) + +func TestUpgrade(t *testing.T) { + yaml := `apiVersion: skaffold/v1beta4 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* +deploy: + kubectl: + manifests: + - k8s-* +profiles: + - name: test profile + build: + kaniko: + buildContext: + gcsBucket: skaffold-kaniko + pullSecretName: e2esecret + namespace: default + cache: {} + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example + test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* + deploy: + kubectl: + manifests: + - k8s-* +` + expected := `apiVersion: skaffold/v1beta5 +kind: Config +build: + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example +test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* +deploy: + kubectl: + manifests: + - k8s-* +profiles: + - name: test profile + build: + kaniko: + buildContext: + gcsBucket: skaffold-kaniko + pullSecretName: e2esecret + namespace: default + cache: {} + artifacts: + - image: gcr.io/k8s-skaffold/skaffold-example + test: + - image: gcr.io/k8s-skaffold/skaffold-example + structureTests: + - ./test/* + deploy: + kubectl: + manifests: + - k8s-* +` + verifyUpgrade(t, yaml, expected) +} + +func verifyUpgrade(t *testing.T, input, output string) { + pipeline := NewSkaffoldPipeline() + err := yaml.UnmarshalStrict([]byte(input), pipeline) + testutil.CheckErrorAndDeepEqual(t, false, err, Version, pipeline.GetVersion()) + + upgraded, err := pipeline.Upgrade() + testutil.CheckError(t, false, err) + + expected := latest.NewSkaffoldPipeline() + err = yaml.UnmarshalStrict([]byte(output), expected) + + testutil.CheckErrorAndDeepEqual(t, false, err, expected, upgraded) +} diff --git a/pkg/skaffold/schema/versions.go b/pkg/skaffold/schema/versions.go index 2cf8c005f89..eb41407ae4e 100644 --- a/pkg/skaffold/schema/versions.go +++ b/pkg/skaffold/schema/versions.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -32,6 +32,9 @@ import ( "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1alpha4" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1alpha5" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta1" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta2" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta3" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/v1beta4" misc "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/yamltags" ) @@ -40,28 +43,31 @@ type APIVersion struct { Version string `yaml:"apiVersion"` } -var schemaVersions = versions{ +var SchemaVersions = Versions{ {v1alpha1.Version, v1alpha1.NewSkaffoldPipeline}, {v1alpha2.Version, v1alpha2.NewSkaffoldPipeline}, {v1alpha3.Version, v1alpha3.NewSkaffoldPipeline}, {v1alpha4.Version, v1alpha4.NewSkaffoldPipeline}, {v1alpha5.Version, v1alpha5.NewSkaffoldPipeline}, {v1beta1.Version, v1beta1.NewSkaffoldPipeline}, + {v1beta2.Version, v1beta2.NewSkaffoldPipeline}, + {v1beta3.Version, v1beta3.NewSkaffoldPipeline}, + {v1beta4.Version, v1beta4.NewSkaffoldPipeline}, {latest.Version, latest.NewSkaffoldPipeline}, } -type version struct { - apiVersion string - factory func() util.VersionedConfig +type Version struct { + APIVersion string + Factory func() util.VersionedConfig } -type versions []version +type Versions []Version // Find search the constructor for a given api version. -func (v *versions) Find(apiVersion string) (func() util.VersionedConfig, bool) { +func (v *Versions) Find(apiVersion string) (func() util.VersionedConfig, bool) { for _, version := range *v { - if version.apiVersion == apiVersion { - return version.factory, true + if version.APIVersion == apiVersion { + return version.Factory, true } } @@ -80,7 +86,7 @@ func ParseConfig(filename string, upgrade bool) (util.VersionedConfig, error) { return nil, errors.Wrap(err, "parsing api version") } - factory, present := schemaVersions.Find(apiVersion.Version) + factory, present := SchemaVersions.Find(apiVersion.Version) if !present { return nil, errors.Errorf("unknown api version: '%s'", apiVersion.Version) } @@ -119,7 +125,7 @@ func upgradeToLatest(vc util.VersionedConfig) (util.VersionedConfig, error) { return vc, nil } if version.GT(semver) { - return nil, fmt.Errorf("config version %s is too new for this version of skaffold: upgrade skaffold", vc.GetVersion()) + return nil, fmt.Errorf("config version %s is too new for this version: upgrade Skaffold", vc.GetVersion()) } logrus.Warnf("config version (%s) out of date: upgrading to latest (%s)", vc.GetVersion(), latest.Version) diff --git a/pkg/skaffold/schema/versions_test.go b/pkg/skaffold/schema/versions_test.go index 371fda07163..ad09e69e705 100644 --- a/pkg/skaffold/schema/versions_test.go +++ b/pkg/skaffold/schema/versions_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -88,6 +88,9 @@ build: pullSecretName: secret-name namespace: nskaniko timeout: 120m + dockerConfig: + secretName: config-name + path: /kaniko/.docker ` badConfig = "bad config" ) @@ -169,6 +172,7 @@ func TestParseConfig(t *testing.T) { expected: config( withKanikoBuild("demo", "secret-name", "nskaniko", "/secret.json", "120m", withGitTagger(), + withDockerConfig("config-name", "/kaniko/.docker"), ), withKubectlDeploy("k8s/*.yaml"), ), @@ -237,6 +241,8 @@ func withGoogleCloudBuild(id string, ops ...func(*latest.BuildConfig)) func(*lat b := latest.BuildConfig{BuildType: latest.BuildType{GoogleCloudBuild: &latest.GoogleCloudBuild{ ProjectID: id, DockerImage: "gcr.io/cloud-builders/docker", + MavenImage: "gcr.io/cloud-builders/mvn", + GradleImage: "gcr.io/cloud-builders/gradle", }}} for _, op := range ops { op(&b) @@ -264,6 +270,15 @@ func withKanikoBuild(bucket, secretName, namespace, secret string, timeout strin } } +func withDockerConfig(secretName string, path string) func(*latest.BuildConfig) { + return func(cfg *latest.BuildConfig) { + cfg.KanikoBuild.DockerConfig = &latest.DockerConfig{ + SecretName: secretName, + Path: path, + } + } +} + func withKubectlDeploy(manifests ...string) func(*latest.SkaffoldPipeline) { return func(cfg *latest.SkaffoldPipeline) { cfg.Deploy = latest.DeployConfig{ @@ -333,22 +348,22 @@ func withProfiles(profiles ...latest.Profile) func(*latest.SkaffoldPipeline) { } func TestUpgradeToNextVersion(t *testing.T) { - for i, schemaVersion := range schemaVersions[0 : len(schemaVersions)-2] { + for i, schemaVersion := range SchemaVersions[0 : len(SchemaVersions)-2] { from := schemaVersion - to := schemaVersions[i+1] - description := fmt.Sprintf("Upgrade from %s to %s", from.apiVersion, to.apiVersion) + to := SchemaVersions[i+1] + description := fmt.Sprintf("Upgrade from %s to %s", from.APIVersion, to.APIVersion) t.Run(description, func(t *testing.T) { - factory, _ := schemaVersions.Find(from.apiVersion) + factory, _ := SchemaVersions.Find(from.APIVersion) newer, err := factory().Upgrade() - testutil.CheckErrorAndDeepEqual(t, false, err, to.apiVersion, newer.GetVersion()) + testutil.CheckErrorAndDeepEqual(t, false, err, to.APIVersion, newer.GetVersion()) }) } } func TestCantUpgradeFromLatestVersion(t *testing.T) { - factory, present := schemaVersions.Find(latest.Version) + factory, present := SchemaVersions.Find(latest.Version) testutil.CheckDeepEqual(t, true, present) _, err := factory().Upgrade() diff --git a/pkg/skaffold/sources/upload.go b/pkg/skaffold/sources/upload.go new file mode 100644 index 00000000000..2c2d44d3738 --- /dev/null +++ b/pkg/skaffold/sources/upload.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sources + +import ( + "context" + "io" + + cstorage "cloud.google.com/go/storage" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" + "github.com/pkg/errors" +) + +// TarGz creates a .tgz archive of the artifact's sources. +func TarGz(ctx context.Context, w io.Writer, a *latest.Artifact, dependencies []string) error { + if err := util.CreateTarGz(w, a.Workspace, dependencies); err != nil { + return errors.Wrap(err, "creating tar gz") + } + + return nil +} + +// UploadToGCS uploads the artifact's sources to a GCS bucket. +func UploadToGCS(ctx context.Context, a *latest.Artifact, bucket, objectName string, dependencies []string) error { + c, err := cstorage.NewClient(ctx) + if err != nil { + return errors.Wrap(err, "creating GCS client") + } + defer c.Close() + + w := c.Bucket(bucket).Object(objectName).NewWriter(ctx) + if err := TarGz(ctx, w, a, dependencies); err != nil { + return errors.Wrap(err, "uploading targz to google storage") + } + + return w.Close() +} diff --git a/pkg/skaffold/sync/kubectl/kubectl.go b/pkg/skaffold/sync/kubectl/kubectl.go index 0359b4219b3..6ba56f2f1ba 100644 --- a/pkg/skaffold/sync/kubectl/kubectl.go +++ b/pkg/skaffold/sync/kubectl/kubectl.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,48 +18,70 @@ package kubectl import ( "context" - "fmt" + "io" "os/exec" - "path/filepath" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/sync" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" ) -type Syncer struct{} +type Syncer struct { + namespaces []string +} -var syncedDirs = map[string]struct{}{} +func NewSyncer(namespaces []string) *Syncer { + return &Syncer{ + namespaces: namespaces, + } +} func (k *Syncer) Sync(ctx context.Context, s *sync.Item) error { - logrus.Infoln("Copying files:", s.Copy, "to", s.Image) + if len(s.Copy) > 0 { + logrus.Infoln("Copying files:", s.Copy, "to", s.Image) - if err := sync.Perform(ctx, s.Image, s.Copy, copyFileFn); err != nil { - return errors.Wrap(err, "copying files") + if err := sync.Perform(ctx, s.Image, s.Copy, copyFileFn, k.namespaces); err != nil { + return errors.Wrap(err, "copying files") + } } - logrus.Infoln("Deleting files:", s.Delete, "from", s.Image) + if len(s.Delete) > 0 { + logrus.Infoln("Deleting files:", s.Delete, "from", s.Image) - if err := sync.Perform(ctx, s.Image, s.Delete, deleteFileFn); err != nil { - return errors.Wrap(err, "deleting files") + if err := sync.Perform(ctx, s.Image, s.Delete, deleteFileFn, k.namespaces); err != nil { + return errors.Wrap(err, "deleting files") + } } return nil } -func deleteFileFn(ctx context.Context, pod v1.Pod, container v1.Container, src, dst string) []*exec.Cmd { - delete := exec.CommandContext(ctx, "kubectl", "exec", pod.Name, "--namespace", pod.Namespace, "-c", container.Name, "--", "rm", "-rf", dst) +func deleteFileFn(ctx context.Context, pod v1.Pod, container v1.Container, files map[string]string) []*exec.Cmd { + // "kubectl" is below... + deleteCmd := []string{"exec", pod.Name, "--namespace", pod.Namespace, "-c", container.Name, "--", "rm", "-rf"} + args := make([]string, 0, len(deleteCmd)+len(files)) + args = append(args, deleteCmd...) + for _, dst := range files { + args = append(args, dst) + } + delete := exec.CommandContext(ctx, "kubectl", args...) return []*exec.Cmd{delete} } -func copyFileFn(ctx context.Context, pod v1.Pod, container v1.Container, src, dst string) []*exec.Cmd { - dir := filepath.Dir(dst) - var cmds []*exec.Cmd - if _, ok := syncedDirs[dir]; !ok { - cmds = []*exec.Cmd{exec.CommandContext(ctx, "kubectl", "exec", pod.Name, "-c", container.Name, "-n", pod.Namespace, "--", "mkdir", "-p", dir)} - syncedDirs[dir] = struct{}{} - } - copy := exec.CommandContext(ctx, "kubectl", "cp", src, fmt.Sprintf("%s/%s:%s", pod.Namespace, pod.Name, dst), "-c", container.Name) - return append(cmds, copy) +func copyFileFn(ctx context.Context, pod v1.Pod, container v1.Container, files map[string]string) []*exec.Cmd { + // Use "m" flag to touch the files as they are copied. + reader, writer := io.Pipe() + copy := exec.CommandContext(ctx, "kubectl", "exec", pod.Name, "--namespace", pod.Namespace, "-c", container.Name, "-i", + "--", "tar", "xmf", "-", "-C", "/", "--no-same-owner") + copy.Stdin = reader + go func() { + defer writer.Close() + + if err := util.CreateMappedTar(writer, "/", files); err != nil { + logrus.Errorln("Error creating tar archive:", err) + } + }() + return []*exec.Cmd{copy} } diff --git a/pkg/skaffold/sync/sync.go b/pkg/skaffold/sync/sync.go index ddc9c5c5654..263cd5abc73 100644 --- a/pkg/skaffold/sync/sync.go +++ b/pkg/skaffold/sync/sync.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -130,7 +130,7 @@ func intersect(context string, syncMap map[string]string, files []string) (map[s return ret, nil } -func Perform(ctx context.Context, image string, files map[string]string, cmdFn func(context.Context, v1.Pod, v1.Container, string, string) []*exec.Cmd) error { +func Perform(ctx context.Context, image string, files map[string]string, cmdFn func(context.Context, v1.Pod, v1.Container, map[string]string) []*exec.Cmd, namespaces []string) error { if len(files) == 0 { return nil } @@ -140,33 +140,32 @@ func Perform(ctx context.Context, image string, files map[string]string, cmdFn f return errors.Wrap(err, "getting k8s client") } - pods, err := client.CoreV1().Pods("").List(meta_v1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "getting pods") - } - - synced := map[string]bool{} + numSynced := 0 + for _, ns := range namespaces { + pods, err := client.CoreV1().Pods(ns).List(meta_v1.ListOptions{}) + if err != nil { + return errors.Wrap(err, "getting pods for namespace "+ns) + } - for _, p := range pods.Items { - for _, c := range p.Spec.Containers { - if c.Image != image { - continue - } + for _, p := range pods.Items { + for _, c := range p.Spec.Containers { + if c.Image != image { + continue + } - for src, dst := range files { - cmds := cmdFn(ctx, p, c, src, dst) + cmds := cmdFn(ctx, p, c, files) for _, cmd := range cmds { if err := util.RunCmd(cmd); err != nil { return err } + numSynced++ } - synced[src] = true } } } - if len(synced) != len(files) { - return errors.New("couldn't sync all the files") + if numSynced == 0 { + return errors.New("didn't sync any files") } return nil diff --git a/pkg/skaffold/sync/sync_test.go b/pkg/skaffold/sync/sync_test.go index 03ff9a85b24..5fa43783b6b 100644 --- a/pkg/skaffold/sync/sync_test.go +++ b/pkg/skaffold/sync/sync_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -344,8 +344,14 @@ func (t *TestCmdRecorder) RunCmdOut(cmd *exec.Cmd) ([]byte, error) { return nil, t.RunCmd(cmd) } -func fakeCmd(ctx context.Context, p v1.Pod, c v1.Container, src, dst string) []*exec.Cmd { - return []*exec.Cmd{exec.CommandContext(ctx, "copy", src, dst)} +func fakeCmd(ctx context.Context, p v1.Pod, c v1.Container, files map[string]string) []*exec.Cmd { + cmds := make([]*exec.Cmd, len(files)) + i := 0 + for src, dst := range files { + cmds[i] = exec.CommandContext(ctx, "copy", src, dst) + i++ + } + return cmds } var pod = &v1.Pod{ @@ -371,7 +377,7 @@ func TestPerform(t *testing.T) { description string image string files map[string]string - cmdFn func(context.Context, v1.Pod, v1.Container, string, string) []*exec.Cmd + cmdFn func(context.Context, v1.Pod, v1.Container, map[string]string) []*exec.Cmd cmdErr error clientErr error expected []string @@ -422,7 +428,7 @@ func TestPerform(t *testing.T) { util.DefaultExecCommand = cmdRecord - err := Perform(context.Background(), test.image, test.files, test.cmdFn) + err := Perform(context.Background(), test.image, test.files, test.cmdFn, []string{""}) testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, cmdRecord.cmds) }) diff --git a/pkg/skaffold/test/structure/structure.go b/pkg/skaffold/test/structure/structure.go index ba01f4c25e9..717c2cdbc67 100644 --- a/pkg/skaffold/test/structure/structure.go +++ b/pkg/skaffold/test/structure/structure.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/test/structure/types.go b/pkg/skaffold/test/structure/types.go index 11727061213..fe426af993f 100644 --- a/pkg/skaffold/test/structure/types.go +++ b/pkg/skaffold/test/structure/types.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/test/test.go b/pkg/skaffold/test/test.go index 0ba68fd4cfa..101af129789 100644 --- a/pkg/skaffold/test/test.go +++ b/pkg/skaffold/test/test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ import ( // NewTester parses the provided test cases from the Skaffold config, // and returns a Tester instance with all the necessary test runners // to run all specified tests. -func NewTester(testCases *latest.TestConfig) (Tester, error) { +func NewTester(testCases []*latest.TestCase) (Tester, error) { // TODO(nkubala): copied this from runner.getDeployer(), this should be moved somewhere else cwd, err := os.Getwd() if err != nil { @@ -49,7 +49,7 @@ func NewTester(testCases *latest.TestConfig) (Tester, error) { func (t FullTester) TestDependencies() ([]string, error) { var deps []string - for _, test := range *t.testCases { + for _, test := range t.testCases { if test.StructureTests == nil { continue } @@ -68,7 +68,7 @@ func (t FullTester) TestDependencies() ([]string, error) { // Test is the top level testing execution call. It serves as the // entrypoint to all individual tests. func (t FullTester) Test(ctx context.Context, out io.Writer, bRes []build.Artifact) error { - for _, test := range *t.testCases { + for _, test := range t.testCases { if err := t.runStructureTests(ctx, out, bRes, test); err != nil { return errors.Wrap(err, "running structure tests") } diff --git a/pkg/skaffold/test/types.go b/pkg/skaffold/test/types.go index f02f7a651c6..9a6078ed2da 100644 --- a/pkg/skaffold/test/types.go +++ b/pkg/skaffold/test/types.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,7 +42,7 @@ type Tester interface { // FullTester should always be the ONLY implementation of the Tester interface; // newly added testing implementations should implement the Runner interface. type FullTester struct { - testCases *latest.TestConfig + testCases []*latest.TestCase workingDir string } diff --git a/pkg/skaffold/update/update.go b/pkg/skaffold/update/update.go index cca69eb9913..eb706cf6905 100644 --- a/pkg/skaffold/update/update.go +++ b/pkg/skaffold/update/update.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ limitations under the License. package update import ( - "context" "io/ioutil" "net/http" "os" @@ -43,23 +42,29 @@ func IsUpdateCheckEnabled() bool { return v == "" || strings.ToLower(v) == "true" } -// GetLatestVersion uses a VERSION file stored on GCS to determine the latest released version of skaffold -func GetLatestVersion(ctx context.Context) (semver.Version, error) { +// GetLatestAndCurrentVersion uses a VERSION file stored on GCS to determine the latest released version +// and returns it with the current version of Skaffold +func GetLatestAndCurrentVersion() (semver.Version, semver.Version, error) { + none := semver.Version{} resp, err := http.Get(latestVersionURL) if err != nil { - return semver.Version{}, errors.Wrap(err, "getting latest version info from GCS") + return none, none, errors.Wrap(err, "getting latest version info from GCS") } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return semver.Version{}, errors.Wrapf(err, "http %d, error: %s", resp.StatusCode, resp.Status) + return none, none, errors.Wrapf(err, "http %d, error: %s", resp.StatusCode, resp.Status) } versionBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return semver.Version{}, errors.Wrap(err, "reading version file from GCS") + return none, none, errors.Wrap(err, "reading version file from GCS") } - v, err := version.ParseVersion(string(versionBytes)) + latest, err := version.ParseVersion(string(versionBytes)) if err != nil { - return semver.Version{}, errors.Wrap(err, "parsing latest version from GCS") + return none, none, errors.Wrap(err, "parsing latest version from GCS") } - return v, nil + current, err := version.ParseVersion(version.Get().Version) + if err != nil { + return none, none, errors.Wrap(err, "parsing current semver, skipping update check") + } + return latest, current, nil } diff --git a/pkg/skaffold/util/cmd.go b/pkg/skaffold/util/cmd.go index 5d05e480e04..6fb29f65ae6 100644 --- a/pkg/skaffold/util/cmd.go +++ b/pkg/skaffold/util/cmd.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/cmd_test.go b/pkg/skaffold/util/cmd_test.go index 940d054eb6e..5b92eda10e3 100644 --- a/pkg/skaffold/util/cmd_test.go +++ b/pkg/skaffold/util/cmd_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/env_template.go b/pkg/skaffold/util/env_template.go index d1238d94048..2895ed03bb9 100644 --- a/pkg/skaffold/util/env_template.go +++ b/pkg/skaffold/util/env_template.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/env_template_test.go b/pkg/skaffold/util/env_template_test.go index 97ef3e58bc7..71324faa576 100644 --- a/pkg/skaffold/util/env_template_test.go +++ b/pkg/skaffold/util/env_template_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/image.go b/pkg/skaffold/util/image.go index 333f6b685b5..82048ba2d2b 100644 --- a/pkg/skaffold/util/image.go +++ b/pkg/skaffold/util/image.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/image_test.go b/pkg/skaffold/util/image_test.go index a503d3298ce..e72e4eaa7fb 100644 --- a/pkg/skaffold/util/image_test.go +++ b/pkg/skaffold/util/image_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/tar.go b/pkg/skaffold/util/tar.go index af3b8717845..6e2ad32c131 100644 --- a/pkg/skaffold/util/tar.go +++ b/pkg/skaffold/util/tar.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,18 +27,25 @@ import ( "github.com/sirupsen/logrus" ) -func CreateTar(w io.Writer, root string, paths []string) error { +func CreateMappedTar(w io.Writer, root string, pathMap map[string]string) error { tw := tar.NewWriter(w) defer tw.Close() - for _, p := range paths { - tarPath := filepath.ToSlash(p) - - if !filepath.IsAbs(p) { - p = filepath.Join(root, p) + for src, dst := range pathMap { + if err := addFileToTar(root, src, dst, tw); err != nil { + return err } + } + + return nil +} - if err := addFileToTar(p, tarPath, tw); err != nil { +func CreateTar(w io.Writer, root string, paths []string) error { + tw := tar.NewWriter(w) + defer tw.Close() + + for _, path := range paths { + if err := addFileToTar(root, path, "", tw); err != nil { return err } } @@ -52,8 +59,36 @@ func CreateTarGz(w io.Writer, root string, paths []string) error { return CreateTar(gw, root, paths) } -func addFileToTar(p string, tarPath string, tw *tar.Writer) error { - fi, err := os.Lstat(p) +func addFileToTar(root string, src string, dst string, tw *tar.Writer) error { + var ( + absPath string + err error + ) + + absRoot, err := filepath.Abs(root) + if err != nil { + return err + } + + if filepath.IsAbs(src) { + absPath = src + } else { + absPath, err = filepath.Abs(src) + if err != nil { + return err + } + } + + tarPath := dst + if tarPath == "" { + tarPath, err = filepath.Rel(absRoot, absPath) + if err != nil { + return err + } + } + tarPath = filepath.ToSlash(tarPath) + + fi, err := os.Lstat(absPath) if err != nil { return err } @@ -68,22 +103,22 @@ func addFileToTar(p string, tarPath string, tw *tar.Writer) error { if err := tw.WriteHeader(tarHeader); err != nil { return err } - f, err := os.Open(p) + f, err := os.Open(absPath) if err != nil { return err } defer f.Close() if _, err := io.Copy(tw, f); err != nil { - return errors.Wrapf(err, "writing real file %s", p) + return errors.Wrapf(err, "writing real file %s", absPath) } case (mode & os.ModeSymlink) != 0: - target, err := os.Readlink(p) + target, err := os.Readlink(absPath) if err != nil { return err } if filepath.IsAbs(target) { - logrus.Warnf("Skipping %s. Only relative symlinks are supported.", p) + logrus.Warnf("Skipping %s. Only relative symlinks are supported.", absPath) return nil } @@ -96,7 +131,7 @@ func addFileToTar(p string, tarPath string, tw *tar.Writer) error { return err } default: - logrus.Warnf("Adding possibly unsupported file %s of type %s.", p, mode) + logrus.Warnf("Adding possibly unsupported file %s of type %s.", absPath, mode) // Try to add it anyway? tarHeader, err := tar.FileInfoHeader(fi, "") if err != nil { diff --git a/pkg/skaffold/util/tar_non_windows_test.go b/pkg/skaffold/util/tar_non_windows_test.go index 01e5fcd4ef9..f28a1465bae 100644 --- a/pkg/skaffold/util/tar_non_windows_test.go +++ b/pkg/skaffold/util/tar_non_windows_test.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -59,16 +59,19 @@ func Test_addLinksToTar(t *testing.T) { } } + reset := testutil.Chdir(t, tmpDir.Root()) + defer reset() + // Add all the files and links to a tar. var b bytes.Buffer tw := tar.NewWriter(&b) for p := range files { - if err := addFileToTar(tmpDir.Path(p), p, tw); err != nil { + if err := addFileToTar(".", p, tw); err != nil { t.Fatalf("addFileToTar() error = %v", err) } } for l := range links { - if err := addFileToTar(tmpDir.Path(l), l, tw); err != nil { + if err := addFileToTar(".", l, tw); err != nil { t.Fatalf("addFileToTar() error = %v", err) } } diff --git a/pkg/skaffold/util/tar_test.go b/pkg/skaffold/util/tar_test.go index 820b94e99ce..63bd2967675 100644 --- a/pkg/skaffold/util/tar_test.go +++ b/pkg/skaffold/util/tar_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import ( "github.com/GoogleContainerTools/skaffold/testutil" ) -func Test_addFileToTar(t *testing.T) { +func TestCreateTar(t *testing.T) { tmpDir, cleanup := testutil.NewTempDir(t) defer cleanup() @@ -35,41 +35,116 @@ func Test_addFileToTar(t *testing.T) { "bar/bat": "baz2", "bar/baz": "baz3", } - for p, c := range files { - tmpDir.Write(p, c) + var paths []string + for path, content := range files { + tmpDir.Write(path, content) + paths = append(paths, path) } - // Add all the files to a tar. + reset := testutil.Chdir(t, tmpDir.Root()) + defer reset() + var b bytes.Buffer - tw := tar.NewWriter(&b) - for p := range files { - path := tmpDir.Path(p) - if err := addFileToTar(path, p, tw); err != nil { - t.Fatalf("addFileToTar() error = %v", err) + err := CreateTar(&b, ".", paths) + testutil.CheckError(t, false, err) + + // Make sure the contents match. + tarFiles := make(map[string]string) + tr := tar.NewReader(&b) + for { + hdr, err := tr.Next() + if err == io.EOF { + break } + testutil.CheckError(t, false, err) + + content, err := ioutil.ReadAll(tr) + testutil.CheckError(t, false, err) + + tarFiles[hdr.Name] = string(content) } - tw.Close() + + testutil.CheckErrorAndDeepEqual(t, false, err, files, tarFiles) +} + +func TestCreateTarSubDirectory(t *testing.T) { + tmpDir, cleanup := testutil.NewTempDir(t) + defer cleanup() + + files := map[string]string{ + "sub/foo": "baz1", + "sub/bar/bat": "baz2", + "sub/bar/baz": "baz3", + } + var paths []string + for path, content := range files { + tmpDir.Write(path, content) + paths = append(paths, path) + } + + reset := testutil.Chdir(t, tmpDir.Root()) + defer reset() + + var b bytes.Buffer + err := CreateTar(&b, "sub", paths) + testutil.CheckError(t, false, err) // Make sure the contents match. + tarFiles := make(map[string]string) tr := tar.NewReader(&b) for { hdr, err := tr.Next() if err == io.EOF { break } - if err != nil { - t.Errorf("Error reading tar: %s", err) - } - expectedContents, ok := files[hdr.Name] - if !ok { - t.Errorf("Unexpected file in tar: %s", hdr.Name) - } - actualContents, err := ioutil.ReadAll(tr) - if err != nil { - t.Errorf("Error %s reading file %s from tar", err, hdr.Name) - } - if expectedContents != string(actualContents) { - t.Errorf("File contents don't match. %s != %s", actualContents, expectedContents) + testutil.CheckError(t, false, err) + + content, err := ioutil.ReadAll(tr) + testutil.CheckError(t, false, err) + + tarFiles["sub/"+hdr.Name] = string(content) + } + + testutil.CheckErrorAndDeepEqual(t, false, err, files, tarFiles) +} + +func TestCreateTarWithAbsolutePaths(t *testing.T) { + tmpDir, cleanup := testutil.NewTempDir(t) + defer cleanup() + + files := map[string]string{ + "foo": "baz1", + "bar/bat": "baz2", + "bar/baz": "baz3", + } + var paths []string + for path, content := range files { + tmpDir.Write(path, content) + paths = append(paths, tmpDir.Path(path)) + } + + reset := testutil.Chdir(t, tmpDir.Root()) + defer reset() + + var b bytes.Buffer + err := CreateTar(&b, tmpDir.Root(), paths) + testutil.CheckError(t, false, err) + + // Make sure the contents match. + tarFiles := make(map[string]string) + tr := tar.NewReader(&b) + for { + hdr, err := tr.Next() + if err == io.EOF { + break } + testutil.CheckError(t, false, err) + + content, err := ioutil.ReadAll(tr) + testutil.CheckError(t, false, err) + + tarFiles[hdr.Name] = string(content) } + + testutil.CheckErrorAndDeepEqual(t, false, err, files, tarFiles) } diff --git a/pkg/skaffold/util/util.go b/pkg/skaffold/util/util.go index 047ca0326ba..8d0618d8eb0 100644 --- a/pkg/skaffold/util/util.go +++ b/pkg/skaffold/util/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "crypto/rand" + "encoding/json" "fmt" "io/ioutil" "net/http" @@ -43,6 +44,15 @@ func RandomID() string { return fmt.Sprintf("%x", b) } +func RandomFourCharacterID() string { + b := make([]byte, 2) + _, err := rand.Read(b) + if err != nil { + panic(err) + } + return fmt.Sprintf("%x", b) +} + // These are the supported file formats for kubernetes manifests var validSuffixes = []string{".yml", ".yaml", ".json"} @@ -84,6 +94,9 @@ func ExpandPathsGlob(workingDir string, paths []string) ([]string, error) { if err != nil { return nil, errors.Wrap(err, "glob") } + if len(files) == 0 { + logrus.Warnf("%s did not match any file", p) + } for _, f := range files { err := filepath.Walk(f, func(path string, info os.FileInfo, err error) error { @@ -242,3 +255,28 @@ func NonEmptyLines(input []byte) []string { } return result } + +// CloneThroughJSON marshals the old interface into the new one +func CloneThroughJSON(old interface{}, new interface{}) error { + o, err := json.Marshal(old) + if err != nil { + return errors.Wrap(err, "marshalling old") + } + if err := json.Unmarshal(o, &new); err != nil { + return errors.Wrap(err, "unmarshalling new") + } + return nil +} + +// AbsolutePaths prepends each path in paths with workspace if the path isn't absolute +func AbsolutePaths(workspace string, paths []string) []string { + var p []string + for _, path := range paths { + // TODO(dgageot): this is only done for jib builder. + if !filepath.IsAbs(path) { + path = filepath.Join(workspace, path) + } + p = append(p, path) + } + return p +} diff --git a/pkg/skaffold/util/util_test.go b/pkg/skaffold/util/util_test.go index a87b2b1912f..bea54ba995c 100644 --- a/pkg/skaffold/util/util_test.go +++ b/pkg/skaffold/util/util_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "path/filepath" "testing" + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/testutil" ) @@ -208,3 +209,29 @@ func TestNonEmptyLines(t *testing.T) { }) } } + +func TestCloneThroughJSON(t *testing.T) { + tests := []struct { + name string + old interface{} + new interface{} + expected interface{} + }{ + { + name: "google cloud build", + old: map[string]string{ + "projectId": "unit-test", + }, + new: &latest.GoogleCloudBuild{}, + expected: &latest.GoogleCloudBuild{ + ProjectID: "unit-test", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := CloneThroughJSON(test.old, test.new) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expected, test.new) + }) + } +} diff --git a/pkg/skaffold/util/wrapper.go b/pkg/skaffold/util/wrapper.go index ef89a01f199..dbee98327cd 100644 --- a/pkg/skaffold/util/wrapper.go +++ b/pkg/skaffold/util/wrapper.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/wrapper_unix.go b/pkg/skaffold/util/wrapper_unix.go index f84c82e62b1..d7fcf290122 100644 --- a/pkg/skaffold/util/wrapper_unix.go +++ b/pkg/skaffold/util/wrapper_unix.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/wrapper_unix_test.go b/pkg/skaffold/util/wrapper_unix_test.go index 0df091264e4..5077550646a 100644 --- a/pkg/skaffold/util/wrapper_unix_test.go +++ b/pkg/skaffold/util/wrapper_unix_test.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/wrapper_windows.go b/pkg/skaffold/util/wrapper_windows.go index abdad6780bd..0fcfbb9f5cf 100644 --- a/pkg/skaffold/util/wrapper_windows.go +++ b/pkg/skaffold/util/wrapper_windows.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/util/wrapper_windows_test.go b/pkg/skaffold/util/wrapper_windows_test.go index 53d454a65aa..60f32bc2142 100644 --- a/pkg/skaffold/util/wrapper_windows_test.go +++ b/pkg/skaffold/util/wrapper_windows_test.go @@ -1,7 +1,7 @@ // +build windows /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/version/version.go b/pkg/skaffold/version/version.go index 6e9552be6c5..31d344ca010 100644 --- a/pkg/skaffold/version/version.go +++ b/pkg/skaffold/version/version.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/version/version_test.go b/pkg/skaffold/version/version_test.go index dd19cf7c14d..35918166557 100644 --- a/pkg/skaffold/version/version_test.go +++ b/pkg/skaffold/version/version_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/warnings/warnings.go b/pkg/skaffold/warnings/warnings.go new file mode 100644 index 00000000000..6ddad96453b --- /dev/null +++ b/pkg/skaffold/warnings/warnings.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package warnings + +import ( + "fmt" + "sort" + + "github.com/sirupsen/logrus" +) + +// Warner prints warnings +type Warner func(format string, args ...interface{}) + +// Printf can be overridden for testing +var Printf = logrus.Warnf + +// Collect is used for testing to collect warnings +// instead of printing them +type Collect struct { + Warnings []string +} + +// Warnf collects all the warnings for unit tests +func (l *Collect) Warnf(format string, args ...interface{}) { + l.Warnings = append(l.Warnings, fmt.Sprintf(format, args...)) + sort.Strings(l.Warnings) +} diff --git a/pkg/skaffold/watch/changes.go b/pkg/skaffold/watch/changes.go index 8d61ce9b589..a72fbf530d2 100644 --- a/pkg/skaffold/watch/changes.go +++ b/pkg/skaffold/watch/changes.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/watch/changes_test.go b/pkg/skaffold/watch/changes_test.go index 47c9f20cab0..4e1a012ba89 100644 --- a/pkg/skaffold/watch/changes_test.go +++ b/pkg/skaffold/watch/changes_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/watch/triggers.go b/pkg/skaffold/watch/triggers.go index eeb5089bc6a..07c11e62bed 100644 --- a/pkg/skaffold/watch/triggers.go +++ b/pkg/skaffold/watch/triggers.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -44,11 +44,11 @@ func NewTrigger(opts *config.SkaffoldOptions) (Trigger, error) { switch strings.ToLower(opts.Trigger) { case "polling": return &pollTrigger{ - interval: time.Duration(opts.WatchPollInterval) * time.Millisecond, + Interval: time.Duration(opts.WatchPollInterval) * time.Millisecond, }, nil case "notify": return &fsNotifyTrigger{ - interval: time.Duration(opts.WatchPollInterval) * time.Millisecond, + Interval: time.Duration(opts.WatchPollInterval) * time.Millisecond, }, nil case "manual": return &manualTrigger{}, nil @@ -59,7 +59,7 @@ func NewTrigger(opts *config.SkaffoldOptions) (Trigger, error) { // pollTrigger watches for changes on a given interval of time. type pollTrigger struct { - interval time.Duration + Interval time.Duration } // Debounce tells the watcher to debounce rapid sequence of changes. @@ -68,14 +68,14 @@ func (t *pollTrigger) Debounce() bool { } func (t *pollTrigger) WatchForChanges(out io.Writer) { - color.Yellow.Fprintf(out, "Watching for changes every %v...\n", t.interval) + color.Yellow.Fprintf(out, "Watching for changes every %v...\n", t.Interval) } // Start starts a timer. func (t *pollTrigger) Start(ctx context.Context) (<-chan bool, error) { trigger := make(chan bool) - ticker := time.NewTicker(t.interval) + ticker := time.NewTicker(t.Interval) go func() { for { select { @@ -133,7 +133,7 @@ func (t *manualTrigger) Start(ctx context.Context) (<-chan bool, error) { // notifyTrigger watches for changes with fsnotify type fsNotifyTrigger struct { - interval time.Duration + Interval time.Duration } // Debounce tells the watcher to not debounce rapid sequence of changes. @@ -167,7 +167,7 @@ func (t *fsNotifyTrigger) Start(ctx context.Context) (<-chan bool, error) { // Wait t.interval before triggering. // This way, rapid stream of events will be grouped. - timer.Reset(t.interval) + timer.Reset(t.Interval) case <-timer.C: trigger <- true case <-ctx.Done(): diff --git a/pkg/skaffold/watch/triggers_test.go b/pkg/skaffold/watch/triggers_test.go new file mode 100644 index 00000000000..6c2436a04bc --- /dev/null +++ b/pkg/skaffold/watch/triggers_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "bytes" + "testing" + "time" + + "github.com/GoogleContainerTools/skaffold/pkg/skaffold/config" + "github.com/GoogleContainerTools/skaffold/testutil" +) + +func TestNewTrigger(t *testing.T) { + var tests = []struct { + description string + opts *config.SkaffoldOptions + expected Trigger + shouldErr bool + }{ + { + description: "polling trigger", + opts: &config.SkaffoldOptions{Trigger: "polling", WatchPollInterval: 1}, + expected: &pollTrigger{ + Interval: time.Duration(1) * time.Millisecond, + }, + }, + { + description: "notify trigger", + opts: &config.SkaffoldOptions{Trigger: "notify", WatchPollInterval: 1}, + expected: &fsNotifyTrigger{ + Interval: time.Duration(1) * time.Millisecond, + }, + }, + { + description: "manual trigger", + opts: &config.SkaffoldOptions{Trigger: "manual"}, + expected: &manualTrigger{}, + }, + { + description: "unknown trigger", + opts: &config.SkaffoldOptions{Trigger: "unknown"}, + shouldErr: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + got, err := NewTrigger(test.opts) + testutil.CheckErrorAndDeepEqual(t, test.shouldErr, err, test.expected, got) + }) + } +} + +func TestPollTrigger_Debounce(t *testing.T) { + trigger := &pollTrigger{} + got, want := trigger.Debounce(), true + testutil.CheckDeepEqual(t, want, got) +} + +func TestPollTrigger_WatchForChanges(t *testing.T) { + out := new(bytes.Buffer) + + trigger := &pollTrigger{Interval: 10} + trigger.WatchForChanges(out) + + got, want := out.String(), "Watching for changes every 10ns...\n" + testutil.CheckDeepEqual(t, want, got) +} + +func TestNotifyTrigger_Debounce(t *testing.T) { + trigger := &fsNotifyTrigger{} + got, want := trigger.Debounce(), false + testutil.CheckDeepEqual(t, want, got) +} + +func TestNotifyTrigger_WatchForChanges(t *testing.T) { + out := new(bytes.Buffer) + + trigger := &fsNotifyTrigger{Interval: 10} + trigger.WatchForChanges(out) + + got, want := out.String(), "Watching for changes...\n" + testutil.CheckDeepEqual(t, want, got) +} + +func TestManualTrigger_Debounce(t *testing.T) { + trigger := &manualTrigger{} + got, want := trigger.Debounce(), false + testutil.CheckDeepEqual(t, want, got) +} + +func TestManualTrigger_WatchForChanges(t *testing.T) { + out := new(bytes.Buffer) + + trigger := &manualTrigger{} + trigger.WatchForChanges(out) + + got, want := out.String(), "Press any key to rebuild/redeploy the changes\n" + testutil.CheckDeepEqual(t, want, got) +} diff --git a/pkg/skaffold/watch/watch.go b/pkg/skaffold/watch/watch.go index ebd3daf141e..4fe803992e8 100644 --- a/pkg/skaffold/watch/watch.go +++ b/pkg/skaffold/watch/watch.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/skaffold/watch/watch_test.go b/pkg/skaffold/watch/watch_test.go index 576b3cd141b..bf34e66f818 100644 --- a/pkg/skaffold/watch/watch_test.go +++ b/pkg/skaffold/watch/watch_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -72,7 +72,7 @@ func TestWatch(t *testing.T) { // Watch folder watcher := NewWatcher(&pollTrigger{ - interval: 10 * time.Millisecond, + Interval: 10 * time.Millisecond, }) err := watcher.Register(folder.List, folderChanged.call) testutil.CheckError(t, false, err) diff --git a/pkg/skaffold/yamltags/tags.go b/pkg/skaffold/yamltags/tags.go index 20822790397..2dcf26ca77f 100644 --- a/pkg/skaffold/yamltags/tags.go +++ b/pkg/skaffold/yamltags/tags.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ limitations under the License. package yamltags import ( - "errors" "fmt" "reflect" "strconv" @@ -56,7 +55,9 @@ func ProcessTags(yamltags string, val reflect.Value, parentStruct reflect.Value, var yt YamlTag switch tagParts[0] { case "required": - yt = &RequiredTag{} + yt = &RequiredTag{ + Field: field, + } case "default": yt = &DefaultTag{} case "oneOf": @@ -81,6 +82,7 @@ type YamlTag interface { } type RequiredTag struct { + Field reflect.StructField } func (rt *RequiredTag) Load(s []string) error { @@ -89,7 +91,10 @@ func (rt *RequiredTag) Load(s []string) error { func (rt *RequiredTag) Process(val reflect.Value) error { if isZeroValue(val) { - return errors.New("required value not set") + if tags, ok := rt.Field.Tag.Lookup("yaml"); ok { + return fmt.Errorf("required value not set: %s", strings.Split(tags, ",")[0]) + } + return fmt.Errorf("required value not set: %s", rt.Field.Name) } return nil } diff --git a/pkg/skaffold/yamltags/tags_test.go b/pkg/skaffold/yamltags/tags_test.go index 058761eda0c..1b2f4fd3f30 100644 --- a/pkg/skaffold/yamltags/tags_test.go +++ b/pkg/skaffold/yamltags/tags_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/constants/constants.go b/pkg/webhook/constants/constants.go index b7a2e2ba9e9..bdf953498bf 100644 --- a/pkg/webhook/constants/constants.go +++ b/pkg/webhook/constants/constants.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/gcs/gcs.go b/pkg/webhook/gcs/gcs.go index eeac0b24d9c..6594eec8705 100644 --- a/pkg/webhook/gcs/gcs.go +++ b/pkg/webhook/gcs/gcs.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/github/github.go b/pkg/webhook/github/github.go index be5e673a919..c3bdf19992b 100644 --- a/pkg/webhook/github/github.go +++ b/pkg/webhook/github/github.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/kubernetes/cleanup.go b/pkg/webhook/kubernetes/cleanup.go index c4dbeb6bf15..2ffe6bd3689 100644 --- a/pkg/webhook/kubernetes/cleanup.go +++ b/pkg/webhook/kubernetes/cleanup.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/kubernetes/deployment.go b/pkg/webhook/kubernetes/deployment.go index 6afb19de8f1..db84ce12a46 100644 --- a/pkg/webhook/kubernetes/deployment.go +++ b/pkg/webhook/kubernetes/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -139,6 +139,7 @@ func WaitForDeploymentToStabilize(d *appsv1.Deployment, ip string) error { if err != nil { return false, nil } + defer resp.Body.Close() return resp.StatusCode == http.StatusOK, nil }) } diff --git a/pkg/webhook/kubernetes/service.go b/pkg/webhook/kubernetes/service.go index d57bf9a8ec1..a7a54f2062d 100644 --- a/pkg/webhook/kubernetes/service.go +++ b/pkg/webhook/kubernetes/service.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/labels/labels.go b/pkg/webhook/labels/labels.go index f55ef40b4df..66051ca1c35 100644 --- a/pkg/webhook/labels/labels.go +++ b/pkg/webhook/labels/labels.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/labels/labels_test.go b/pkg/webhook/labels/labels_test.go new file mode 100644 index 00000000000..a5ec405345e --- /dev/null +++ b/pkg/webhook/labels/labels_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Skaffold Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "testing" + + "github.com/GoogleContainerTools/skaffold/pkg/webhook/constants" + "github.com/GoogleContainerTools/skaffold/testutil" + "github.com/google/go-github/github" +) + +func TestGenerateLabelsFromPR(t *testing.T) { + want := map[string]string{ + "docs-controller-deployment": "true", + "deployment": "docs-controller-deployment-1", + } + got := GenerateLabelsFromPR(1) + + testutil.CheckDeepEqual(t, want, got) +} + +func TestSelector(t *testing.T) { + want := "deployment=docs-controller-deployment-1" + got := Selector(1) + + testutil.CheckDeepEqual(t, want, got) +} + +func TestRetrieveLabel(t *testing.T) { + wantKey, wantValue := "deployment", "docs-controller-deployment-1" + gotKey, gotValue := RetrieveLabel(1) + + testutil.CheckDeepEqual(t, wantKey, gotKey) + testutil.CheckDeepEqual(t, wantValue, gotValue) +} + +func TestDocsLabelExists(t *testing.T) { + var tests = []struct { + description string + labels []*github.Label + expected bool + }{ + { + description: "labels are nil", + labels: nil, + expected: false, + }, + { + description: "labels are empty", + labels: []*github.Label{}, + expected: false, + }, + { + description: "doesn't contain the right label", + labels: []*github.Label{nil, {Name: github.String("test")}}, + expected: false, + }, + { + description: "contains the right label", + labels: []*github.Label{{Name: github.String("test")}, {Name: github.String(constants.DocsLabel)}}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + got := DocsLabelExists(test.labels) + + testutil.CheckDeepEqual(t, test.expected, got) + }) + } +} diff --git a/test.sh b/test.sh index 8eb1cd98628..e2506913465 100755 --- a/test.sh +++ b/test.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 The Skaffold Authors +# Copyright 2019 The Skaffold Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/testutil/cmd_helper.go b/testutil/cmd_helper.go index e2b14fdbe8a..a7c15c028e1 100644 --- a/testutil/cmd_helper.go +++ b/testutil/cmd_helper.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/testutil/fake_image_api.go b/testutil/fake_image_api.go index 8b1bc893438..874f4644c6e 100644 --- a/testutil/fake_image_api.go +++ b/testutil/fake_image_api.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ package testutil import ( "context" - "crypto/rand" + "crypto/sha256" "fmt" "io" "io/ioutil" @@ -37,7 +37,10 @@ type FakeAPIClient struct { ErrImageInspect bool ErrImageTag bool ErrImagePush bool + ErrImagePull bool ErrStream bool + + nextImageID int } type errReader struct{} @@ -61,7 +64,8 @@ func (f *FakeAPIClient) ImageBuild(_ context.Context, _ io.Reader, options types f.TagToImageID = make(map[string]string) } - imageID := "sha256:" + randomID() + f.nextImageID++ + imageID := fmt.Sprintf("sha256:%d", f.nextImageID) f.TagToImageID[imageID] = imageID for _, tag := range options.Tags { @@ -76,12 +80,6 @@ func (f *FakeAPIClient) ImageBuild(_ context.Context, _ io.Reader, options types }, nil } -func randomID() string { - b := make([]byte, 16) - rand.Read(b) - return fmt.Sprintf("%x", b) -} - func (f *FakeAPIClient) ImageInspectWithRaw(_ context.Context, ref string) (types.ImageInspect, []byte, error) { if f.ErrImageInspect { return types.ImageInspect{}, nil, fmt.Errorf("") @@ -115,11 +113,19 @@ func (f *FakeAPIClient) ImagePush(_ context.Context, ref string, _ types.ImagePu return nil, fmt.Errorf("") } - digest := f.TagToImageID[ref] + digest := fmt.Sprintf("sha256:%x", sha256.New().Sum([]byte(f.TagToImageID[ref]))) return f.body(digest), nil } +func (f *FakeAPIClient) ImagePull(_ context.Context, ref string, _ types.ImagePullOptions) (io.ReadCloser, error) { + if f.ErrImagePull { + return nil, fmt.Errorf("") + } + + return f.body(""), nil +} + func (f *FakeAPIClient) Info(context.Context) (types.Info, error) { return types.Info{ IndexServerAddress: registry.IndexServer, diff --git a/testutil/kubecontext.go b/testutil/kubecontext.go index 5cec145cb45..7c8e38c1678 100644 --- a/testutil/kubecontext.go +++ b/testutil/kubecontext.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/testutil/tmp.go b/testutil/tmp.go index a73c44de49a..61b5c59c6b6 100644 --- a/testutil/tmp.go +++ b/testutil/tmp.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/testutil/util.go b/testutil/util.go index f0721c27916..2b52387ecd4 100644 --- a/testutil/util.go +++ b/testutil/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -70,6 +70,27 @@ func CheckError(t *testing.T, shouldErr bool, err error) { } } +// Chdir changes current directory for a test +func Chdir(t *testing.T, dir string) func() { + t.Helper() + + pwd, err := os.Getwd() + if err != nil { + t.Fatal("unable to get current directory") + } + + err = os.Chdir(dir) + if err != nil { + t.Fatal("unable to change current directory") + } + + return func() { + if err := os.Chdir(pwd); err != nil { + t.Fatal("unable to reset current directory") + } + } +} + func checkErr(shouldErr bool, err error) error { if err == nil && shouldErr { return errors.New("expected error, but returned none") diff --git a/vendor/github.com/gdamore/encoding/LICENSE b/vendor/github.com/gdamore/encoding/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gdamore/encoding/ascii.go b/vendor/github.com/gdamore/encoding/ascii.go new file mode 100644 index 00000000000..b7321f43346 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/ascii.go @@ -0,0 +1,36 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// ASCII represents the 7-bit US-ASCII scheme. It decodes directly to +// UTF-8 without change, as all ASCII values are legal UTF-8. +// Unicode values less than 128 (i.e. 7 bits) map 1:1 with ASCII. +// It encodes runes outside of that to 0x1A, the ASCII substitution character. +var ASCII encoding.Encoding + +func init() { + amap := make(map[byte]rune) + for i := 128; i <= 255; i++ { + amap[byte(i)] = RuneError + } + + cm := &Charmap{Map: amap} + cm.Init() + ASCII = cm +} diff --git a/vendor/github.com/gdamore/encoding/charmap.go b/vendor/github.com/gdamore/encoding/charmap.go new file mode 100644 index 00000000000..db1c33ef7ff --- /dev/null +++ b/vendor/github.com/gdamore/encoding/charmap.go @@ -0,0 +1,196 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "sync" + "unicode/utf8" + + "golang.org/x/text/encoding" + "golang.org/x/text/transform" +) + +const ( + // RuneError is an alias for the UTF-8 replacement rune, '\uFFFD'. + RuneError = '\uFFFD' + + // RuneSelf is the rune below which UTF-8 and the Unicode values are + // identical. Its also the limit for ASCII. + RuneSelf = 0x80 + + // ASCIISub is the ASCII substitution character. + ASCIISub = '\x1a' +) + +// Charmap is a structure for setting up encodings for 8-bit character sets, +// for transforming between UTF8 and that other character set. It has some +// ideas borrowed from golang.org/x/text/encoding/charmap, but it uses a +// different implementation. This implementation uses maps, and supports +// user-defined maps. +// +// We do assume that a character map has a reasonable substitution character, +// and that valid encodings are stable (exactly a 1:1 map) and stateless +// (that is there is no shift character or anything like that.) Hence this +// approach will not work for many East Asian character sets. +// +// Measurement shows little or no measurable difference in the performance of +// the two approaches. The difference was down to a couple of nsec/op, and +// no consistent pattern as to which ran faster. With the conversion to +// UTF-8 the code takes about 25 nsec/op. The conversion in the reverse +// direction takes about 100 nsec/op. (The larger cost for conversion +// from UTF-8 is most likely due to the need to convert the UTF-8 byte stream +// to a rune before conversion. +// +type Charmap struct { + transform.NopResetter + bytes map[rune]byte + runes [256][]byte + once sync.Once + + // The map between bytes and runes. To indicate that a specific + // byte value is invalid for a charcter set, use the rune + // utf8.RuneError. Values that are absent from this map will + // be assumed to have the identity mapping -- that is the default + // is to assume ISO8859-1, where all 8-bit characters have the same + // numeric value as their Unicode runes. (Not to be confused with + // the UTF-8 values, which *will* be different for non-ASCII runes.) + // + // If no values less than RuneSelf are changed (or have non-identity + // mappings), then the character set is assumed to be an ASCII + // superset, and certain assumptions and optimizations become + // available for ASCII bytes. + Map map[byte]rune + + // The ReplacementChar is the byte value to use for substitution. + // It should normally be ASCIISub for ASCII encodings. This may be + // unset (left to zero) for mappings that are strictly ASCII supersets. + // In that case ASCIISub will be assumed instead. + ReplacementChar byte +} + +type cmapDecoder struct { + transform.NopResetter + runes [256][]byte +} + +type cmapEncoder struct { + transform.NopResetter + bytes map[rune]byte + replace byte +} + +// Init initializes internal values of a character map. This should +// be done early, to minimize the cost of allocation of transforms +// later. It is not strictly necessary however, as the allocation +// functions will arrange to call it if it has not already been done. +func (c *Charmap) Init() { + c.once.Do(c.initialize) +} + +func (c *Charmap) initialize() { + c.bytes = make(map[rune]byte) + ascii := true + + for i := 0; i < 256; i++ { + r, ok := c.Map[byte(i)] + if !ok { + r = rune(i) + } + if r < 128 && r != rune(i) { + ascii = false + } + if r != RuneError { + c.bytes[r] = byte(i) + } + utf := make([]byte, utf8.RuneLen(r)) + utf8.EncodeRune(utf, r) + c.runes[i] = utf + } + if ascii && c.ReplacementChar == '\x00' { + c.ReplacementChar = ASCIISub + } +} + +// NewDecoder returns a Decoder the converts from the 8-bit +// character set to UTF-8. Unknown mappings, if any, are mapped +// to '\uFFFD'. +func (c *Charmap) NewDecoder() *encoding.Decoder { + c.Init() + return &encoding.Decoder{Transformer: &cmapDecoder{runes: c.runes}} +} + +// NewEncoder returns a Transformer that converts from UTF8 to the +// 8-bit character set. Unknown mappings are mapped to 0x1A. +func (c *Charmap) NewEncoder() *encoding.Encoder { + c.Init() + return &encoding.Encoder{ + Transformer: &cmapEncoder{ + bytes: c.bytes, + replace: c.ReplacementChar, + }, + } +} + +func (d *cmapDecoder) Transform(dst, src []byte, atEOF bool) (int, int, error) { + var e error + var ndst, nsrc int + + for _, c := range src { + b := d.runes[c] + l := len(b) + + if ndst+l > len(dst) { + e = transform.ErrShortDst + break + } + for i := 0; i < l; i++ { + dst[ndst] = b[i] + ndst++ + } + nsrc++ + } + return ndst, nsrc, e +} + +func (d *cmapEncoder) Transform(dst, src []byte, atEOF bool) (int, int, error) { + var e error + var ndst, nsrc int + for nsrc < len(src) { + if ndst >= len(dst) { + e = transform.ErrShortDst + break + } + + r, sz := utf8.DecodeRune(src[nsrc:]) + if r == utf8.RuneError && sz == 1 { + // If its inconclusive due to insufficient data in + // in the source, report it + if !atEOF && !utf8.FullRune(src[nsrc:]) { + e = transform.ErrShortSrc + break + } + } + + if c, ok := d.bytes[r]; ok { + dst[ndst] = c + } else { + dst[ndst] = d.replace + } + nsrc += sz + ndst++ + } + + return ndst, nsrc, e +} diff --git a/vendor/github.com/gdamore/encoding/doc.go b/vendor/github.com/gdamore/encoding/doc.go new file mode 100644 index 00000000000..8a7b48d7ee0 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoding provides a few of the encoding structures that are +// missing from the Go x/text/encoding tree. +package encoding diff --git a/vendor/github.com/gdamore/encoding/ebcdic.go b/vendor/github.com/gdamore/encoding/ebcdic.go new file mode 100644 index 00000000000..8e13f1a97fd --- /dev/null +++ b/vendor/github.com/gdamore/encoding/ebcdic.go @@ -0,0 +1,273 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// EBCDIC represents the 8-bit EBCDIC scheme, found in some mainframe +// environments. If you don't know what this is, consider yourself lucky. +var EBCDIC encoding.Encoding + +func init() { + cm := &Charmap{ + ReplacementChar: '\x3f', + Map: map[byte]rune{ + // 0x00-0x03 match + 0x04: RuneError, + 0x05: '\t', + 0x06: RuneError, + 0x07: '\x7f', + 0x08: RuneError, + 0x09: RuneError, + 0x0a: RuneError, + // 0x0b-0x13 match + 0x14: RuneError, + 0x15: '\x85', // Not in any ISO code + 0x16: '\x08', + 0x17: RuneError, + // 0x18-0x19 match + 0x1a: RuneError, + 0x1b: RuneError, + // 0x1c-0x1f match + 0x20: RuneError, + 0x21: RuneError, + 0x22: RuneError, + 0x23: RuneError, + 0x24: RuneError, + 0x25: '\n', + 0x26: '\x17', + 0x27: '\x1b', + 0x28: RuneError, + 0x29: RuneError, + 0x2a: RuneError, + 0x2b: RuneError, + 0x2c: RuneError, + 0x2d: '\x05', + 0x2e: '\x06', + 0x2f: '\x07', + 0x30: RuneError, + 0x31: RuneError, + 0x32: '\x16', + 0x33: RuneError, + 0x34: RuneError, + 0x35: RuneError, + 0x36: RuneError, + 0x37: '\x04', + 0x38: RuneError, + 0x39: RuneError, + 0x3a: RuneError, + 0x3b: RuneError, + 0x3c: '\x14', + 0x3d: '\x15', + 0x3e: RuneError, + 0x3f: '\x1a', // also replacement char + 0x40: ' ', + 0x41: '\xa0', + 0x42: RuneError, + 0x43: RuneError, + 0x44: RuneError, + 0x45: RuneError, + 0x46: RuneError, + 0x47: RuneError, + 0x48: RuneError, + 0x49: RuneError, + 0x4a: RuneError, + 0x4b: '.', + 0x4c: '<', + 0x4d: '(', + 0x4e: '+', + 0x4f: '|', + 0x50: '&', + 0x51: RuneError, + 0x52: RuneError, + 0x53: RuneError, + 0x54: RuneError, + 0x55: RuneError, + 0x56: RuneError, + 0x57: RuneError, + 0x58: RuneError, + 0x59: RuneError, + 0x5a: '!', + 0x5b: '$', + 0x5c: '*', + 0x5d: ')', + 0x5e: ';', + 0x5f: '¬', + 0x60: '-', + 0x61: '/', + 0x62: RuneError, + 0x63: RuneError, + 0x64: RuneError, + 0x65: RuneError, + 0x66: RuneError, + 0x67: RuneError, + 0x68: RuneError, + 0x69: RuneError, + 0x6a: '¦', + 0x6b: ',', + 0x6c: '%', + 0x6d: '_', + 0x6e: '>', + 0x6f: '?', + 0x70: RuneError, + 0x71: RuneError, + 0x72: RuneError, + 0x73: RuneError, + 0x74: RuneError, + 0x75: RuneError, + 0x76: RuneError, + 0x77: RuneError, + 0x78: RuneError, + 0x79: '`', + 0x7a: ':', + 0x7b: '#', + 0x7c: '@', + 0x7d: '\'', + 0x7e: '=', + 0x7f: '"', + 0x80: RuneError, + 0x81: 'a', + 0x82: 'b', + 0x83: 'c', + 0x84: 'd', + 0x85: 'e', + 0x86: 'f', + 0x87: 'g', + 0x88: 'h', + 0x89: 'i', + 0x8a: RuneError, + 0x8b: RuneError, + 0x8c: RuneError, + 0x8d: RuneError, + 0x8e: RuneError, + 0x8f: '±', + 0x90: RuneError, + 0x91: 'j', + 0x92: 'k', + 0x93: 'l', + 0x94: 'm', + 0x95: 'n', + 0x96: 'o', + 0x97: 'p', + 0x98: 'q', + 0x99: 'r', + 0x9a: RuneError, + 0x9b: RuneError, + 0x9c: RuneError, + 0x9d: RuneError, + 0x9e: RuneError, + 0x9f: RuneError, + 0xa0: RuneError, + 0xa1: '~', + 0xa2: 's', + 0xa3: 't', + 0xa4: 'u', + 0xa5: 'v', + 0xa6: 'w', + 0xa7: 'x', + 0xa8: 'y', + 0xa9: 'z', + 0xaa: RuneError, + 0xab: RuneError, + 0xac: RuneError, + 0xad: RuneError, + 0xae: RuneError, + 0xaf: RuneError, + 0xb0: '^', + 0xb1: RuneError, + 0xb2: RuneError, + 0xb3: RuneError, + 0xb4: RuneError, + 0xb5: RuneError, + 0xb6: RuneError, + 0xb7: RuneError, + 0xb8: RuneError, + 0xb9: RuneError, + 0xba: '[', + 0xbb: ']', + 0xbc: RuneError, + 0xbd: RuneError, + 0xbe: RuneError, + 0xbf: RuneError, + 0xc0: '{', + 0xc1: 'A', + 0xc2: 'B', + 0xc3: 'C', + 0xc4: 'D', + 0xc5: 'E', + 0xc6: 'F', + 0xc7: 'G', + 0xc8: 'H', + 0xc9: 'I', + 0xca: '\xad', // NB: soft hyphen + 0xcb: RuneError, + 0xcc: RuneError, + 0xcd: RuneError, + 0xce: RuneError, + 0xcf: RuneError, + 0xd0: '}', + 0xd1: 'J', + 0xd2: 'K', + 0xd3: 'L', + 0xd4: 'M', + 0xd5: 'N', + 0xd6: 'O', + 0xd7: 'P', + 0xd8: 'Q', + 0xd9: 'R', + 0xda: RuneError, + 0xdb: RuneError, + 0xdc: RuneError, + 0xdd: RuneError, + 0xde: RuneError, + 0xdf: RuneError, + 0xe0: '\\', + 0xe1: '\u2007', // Non-breaking space + 0xe2: 'S', + 0xe3: 'T', + 0xe4: 'U', + 0xe5: 'V', + 0xe6: 'W', + 0xe7: 'X', + 0xe8: 'Y', + 0xe9: 'Z', + 0xea: RuneError, + 0xeb: RuneError, + 0xec: RuneError, + 0xed: RuneError, + 0xee: RuneError, + 0xef: RuneError, + 0xf0: '0', + 0xf1: '1', + 0xf2: '2', + 0xf3: '3', + 0xf4: '4', + 0xf5: '5', + 0xf6: '6', + 0xf7: '7', + 0xf8: '8', + 0xf9: '9', + 0xfa: RuneError, + 0xfb: RuneError, + 0xfc: RuneError, + 0xfd: RuneError, + 0xfe: RuneError, + 0xff: RuneError, + }} + cm.Init() + EBCDIC = cm +} diff --git a/vendor/github.com/gdamore/encoding/latin1.go b/vendor/github.com/gdamore/encoding/latin1.go new file mode 100644 index 00000000000..226bf01d0f8 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/latin1.go @@ -0,0 +1,33 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// ISO8859_1 represents the 8-bit ISO8859-1 scheme. It decodes directly to +// UTF-8 without change, as all ISO8859-1 values are legal UTF-8. +// Unicode values less than 256 (i.e. 8 bits) map 1:1 with 8859-1. +// It encodes runes outside of that to 0x1A, the ASCII substitution character. +var ISO8859_1 encoding.Encoding + +func init() { + cm := &Charmap{} + cm.Init() + + // 8859-1 is the 8-bit identity map for Unicode. + ISO8859_1 = cm +} diff --git a/vendor/github.com/gdamore/encoding/latin5.go b/vendor/github.com/gdamore/encoding/latin5.go new file mode 100644 index 00000000000..c75ecf27534 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/latin5.go @@ -0,0 +1,35 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// ISO8859_9 represents the 8-bit ISO8859-9 scheme. +var ISO8859_9 encoding.Encoding + +func init() { + cm := &Charmap{Map: map[byte]rune{ + 0xD0: 'Ğ', + 0xDD: 'İ', + 0xDE: 'Ş', + 0xF0: 'ğ', + 0xFD: 'ı', + 0xFE: 'ş', + }} + cm.Init() + ISO8859_9 = cm +} diff --git a/vendor/github.com/gdamore/encoding/utf8.go b/vendor/github.com/gdamore/encoding/utf8.go new file mode 100644 index 00000000000..2d59f4b39dd --- /dev/null +++ b/vendor/github.com/gdamore/encoding/utf8.go @@ -0,0 +1,35 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +type validUtf8 struct{} + +// UTF8 is an encoding for UTF-8. All it does is verify that the UTF-8 +// in is valid. The main reason for its existence is that it will detect +// and report ErrSrcShort or ErrDstShort, whereas the Nop encoding just +// passes every byte, blithely. +var UTF8 encoding.Encoding = validUtf8{} + +func (validUtf8) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: encoding.UTF8Validator} +} + +func (validUtf8) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: encoding.UTF8Validator} +} diff --git a/vendor/github.com/gdamore/tcell/AUTHORS b/vendor/github.com/gdamore/tcell/AUTHORS new file mode 100644 index 00000000000..53f87ee6391 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/AUTHORS @@ -0,0 +1,4 @@ +Garrett D'Amore +Zachary Yedidia +Junegunn Choi +Staysail Systems, Inc. diff --git a/vendor/github.com/gdamore/tcell/LICENSE b/vendor/github.com/gdamore/tcell/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gdamore/tcell/attr.go b/vendor/github.com/gdamore/tcell/attr.go new file mode 100644 index 00000000000..866c0ebd33d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/attr.go @@ -0,0 +1,32 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// AttrMask represents a mask of text attributes, apart from color. +// Note that support for attributes may vary widely across terminals. +type AttrMask int + +// Attributes are not colors, but affect the display of text. They can +// be combined. +const ( + AttrBold AttrMask = 1 << (25 + iota) + AttrBlink + AttrReverse + AttrUnderline + AttrDim + AttrNone AttrMask = 0 // Just normal text. +) + +const attrAll = AttrBold | AttrBlink | AttrReverse | AttrUnderline | AttrDim diff --git a/vendor/github.com/gdamore/tcell/cell.go b/vendor/github.com/gdamore/tcell/cell.go new file mode 100644 index 00000000000..957b62f7db4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/cell.go @@ -0,0 +1,191 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "github.com/mattn/go-runewidth" +) + +type cell struct { + currMain rune + currComb []rune + currStyle Style + lastMain rune + lastStyle Style + lastComb []rune + width int +} + +// CellBuffer represents a two dimensional array of character cells. +// This is primarily intended for use by Screen implementors; it +// contains much of the common code they need. To create one, just +// declare a variable of its type; no explicit initialization is necessary. +// +// CellBuffer is not thread safe. +type CellBuffer struct { + w int + h int + cells []cell +} + +// SetContent sets the contents (primary rune, combining runes, +// and style) for a cell at a given location. +func (cb *CellBuffer) SetContent(x int, y int, + mainc rune, combc []rune, style Style) { + + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + + c.currComb = append([]rune{}, combc...) + i := 0 + for i < len(c.currComb) { + r := c.currComb[i] + if r == '\u200d' { + i += 2 + continue + } + if runewidth.RuneWidth(r) != 0 { + // not a combining character, yank it + c.currComb = append(c.currComb[:i-1], c.currComb[i+1:]...) + continue + } + i++ + } + + if c.currMain != mainc { + c.width = runewidth.RuneWidth(mainc) + } + c.currMain = mainc + c.currStyle = style + } +} + +// GetContent returns the contents of a character cell, including the +// primary rune, any combining character runes (which will usually be +// nil), the style, and the display width in cells. (The width can be +// either 1, normally, or 2 for East Asian full-width characters.) +func (cb *CellBuffer) GetContent(x, y int) (rune, []rune, Style, int) { + var mainc rune + var combc []rune + var style Style + var width int + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + mainc, combc, style = c.currMain, c.currComb, c.currStyle + if width = c.width; width == 0 || mainc < ' ' { + width = 1 + mainc = ' ' + } + } + return mainc, combc, style, width +} + +// Size returns the (width, height) in cells of the buffer. +func (cb *CellBuffer) Size() (int, int) { + return cb.w, cb.h +} + +// Invalidate marks all characters within the buffer as dirty. +func (cb *CellBuffer) Invalidate() { + for i := range cb.cells { + cb.cells[i].lastMain = rune(0) + } +} + +// Dirty checks if a character at the given location needs an +// to be refreshed on the physical display. This returns true +// if the cell content is different since the last time it was +// marked clean. +func (cb *CellBuffer) Dirty(x, y int) bool { + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + if c.lastMain == rune(0) { + return true + } + if c.lastMain != c.currMain { + return true + } + if c.lastStyle != c.currStyle { + return true + } + if len(c.lastComb) != len(c.currComb) { + return true + } + for i := range c.lastComb { + if c.lastComb[i] != c.currComb[i] { + return true + } + } + } + return false +} + +// SetDirty is normally used to indicate that a cell has +// been displayed (in which case dirty is false), or to manually +// force a cell to be marked dirty. +func (cb *CellBuffer) SetDirty(x, y int, dirty bool) { + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + if dirty { + c.lastMain = rune(0) + } else { + if c.currMain == rune(0) { + c.currMain = ' ' + } + c.lastMain = c.currMain + c.lastComb = c.currComb + c.lastStyle = c.currStyle + } + } +} + +// Resize is used to resize the cells array, with different dimensions, +// while preserving the original contents. The cells will be invalidated +// so that they can be redrawn. +func (cb *CellBuffer) Resize(w, h int) { + + if cb.h == h && cb.w == w { + return + } + + newc := make([]cell, w*h) + for y := 0; y < h && y < cb.h; y++ { + for x := 0; x < w && x < cb.w; x++ { + oc := &cb.cells[(y*cb.w)+x] + nc := &newc[(y*w)+x] + nc.currMain = oc.currMain + nc.currComb = oc.currComb + nc.currStyle = oc.currStyle + nc.width = oc.width + nc.lastMain = rune(0) + } + } + cb.cells = newc + cb.h = h + cb.w = w +} + +// Fill fills the entire cell buffer array with the specified character +// and style. Normally choose ' ' to clear the screen. This API doesn't +// support combining characters, or characters with a width larger than one. +func (cb *CellBuffer) Fill(r rune, style Style) { + for i := range cb.cells { + c := &cb.cells[i] + c.currMain = r + c.currComb = nil + c.currStyle = style + c.width = 1 + } +} diff --git a/vendor/github.com/gdamore/tcell/charset_stub.go b/vendor/github.com/gdamore/tcell/charset_stub.go new file mode 100644 index 00000000000..c1c1594c74f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/charset_stub.go @@ -0,0 +1,21 @@ +// +build plan9 nacl + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +func getCharset() string { + return "" +} diff --git a/vendor/github.com/gdamore/tcell/charset_unix.go b/vendor/github.com/gdamore/tcell/charset_unix.go new file mode 100644 index 00000000000..d9f9d8e1fe0 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/charset_unix.go @@ -0,0 +1,49 @@ +// +build !windows,!nacl,!plan9 + +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "os" + "strings" +) + +func getCharset() string { + // Determine the character set. This can help us later. + // Per POSIX, we search for LC_ALL first, then LC_CTYPE, and + // finally LANG. First one set wins. + locale := "" + if locale = os.Getenv("LC_ALL"); locale == "" { + if locale = os.Getenv("LC_CTYPE"); locale == "" { + locale = os.Getenv("LANG") + } + } + if locale == "POSIX" || locale == "C" { + return "US-ASCII" + } + if i := strings.IndexRune(locale, '@'); i >= 0 { + locale = locale[:i] + } + if i := strings.IndexRune(locale, '.'); i >= 0 { + locale = locale[i+1:] + } else { + // Default assumption, and on Linux we can see LC_ALL + // without a character set, which we assume implies UTF-8. + return "UTF-8" + } + // XXX: add support for aliases + return locale +} diff --git a/vendor/github.com/gdamore/tcell/charset_windows.go b/vendor/github.com/gdamore/tcell/charset_windows.go new file mode 100644 index 00000000000..2400aa8a3f0 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/charset_windows.go @@ -0,0 +1,21 @@ +// +build windows + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +func getCharset() string { + return "UTF-16" +} diff --git a/vendor/github.com/gdamore/tcell/color.go b/vendor/github.com/gdamore/tcell/color.go new file mode 100644 index 00000000000..2e096c705a0 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/color.go @@ -0,0 +1,1019 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import "strconv" + +// Color represents a color. The low numeric values are the same as used +// by ECMA-48, and beyond that XTerm. A 24-bit RGB value may be used by +// adding in the ColorIsRGB flag. For Color names we use the W3C approved +// color names. +// +// Note that on various terminals colors may be approximated however, or +// not supported at all. If no suitable representation for a color is known, +// the library will simply not set any color, deferring to whatever default +// attributes the terminal uses. +type Color int32 + +const ( + // ColorDefault is used to leave the Color unchanged from whatever + // system or teminal default may exist. + ColorDefault Color = -1 + + // ColorIsRGB is used to indicate that the numeric value is not + // a known color constant, but rather an RGB value. The lower + // order 3 bytes are RGB. + ColorIsRGB Color = 1 << 24 +) + +// Note that the order of these options is important -- it follows the +// definitions used by ECMA and XTerm. Hence any further named colors +// must begin at a value not less than 256. +const ( + ColorBlack Color = iota + ColorMaroon + ColorGreen + ColorOlive + ColorNavy + ColorPurple + ColorTeal + ColorSilver + ColorGray + ColorRed + ColorLime + ColorYellow + ColorBlue + ColorFuchsia + ColorAqua + ColorWhite + Color16 + Color17 + Color18 + Color19 + Color20 + Color21 + Color22 + Color23 + Color24 + Color25 + Color26 + Color27 + Color28 + Color29 + Color30 + Color31 + Color32 + Color33 + Color34 + Color35 + Color36 + Color37 + Color38 + Color39 + Color40 + Color41 + Color42 + Color43 + Color44 + Color45 + Color46 + Color47 + Color48 + Color49 + Color50 + Color51 + Color52 + Color53 + Color54 + Color55 + Color56 + Color57 + Color58 + Color59 + Color60 + Color61 + Color62 + Color63 + Color64 + Color65 + Color66 + Color67 + Color68 + Color69 + Color70 + Color71 + Color72 + Color73 + Color74 + Color75 + Color76 + Color77 + Color78 + Color79 + Color80 + Color81 + Color82 + Color83 + Color84 + Color85 + Color86 + Color87 + Color88 + Color89 + Color90 + Color91 + Color92 + Color93 + Color94 + Color95 + Color96 + Color97 + Color98 + Color99 + Color100 + Color101 + Color102 + Color103 + Color104 + Color105 + Color106 + Color107 + Color108 + Color109 + Color110 + Color111 + Color112 + Color113 + Color114 + Color115 + Color116 + Color117 + Color118 + Color119 + Color120 + Color121 + Color122 + Color123 + Color124 + Color125 + Color126 + Color127 + Color128 + Color129 + Color130 + Color131 + Color132 + Color133 + Color134 + Color135 + Color136 + Color137 + Color138 + Color139 + Color140 + Color141 + Color142 + Color143 + Color144 + Color145 + Color146 + Color147 + Color148 + Color149 + Color150 + Color151 + Color152 + Color153 + Color154 + Color155 + Color156 + Color157 + Color158 + Color159 + Color160 + Color161 + Color162 + Color163 + Color164 + Color165 + Color166 + Color167 + Color168 + Color169 + Color170 + Color171 + Color172 + Color173 + Color174 + Color175 + Color176 + Color177 + Color178 + Color179 + Color180 + Color181 + Color182 + Color183 + Color184 + Color185 + Color186 + Color187 + Color188 + Color189 + Color190 + Color191 + Color192 + Color193 + Color194 + Color195 + Color196 + Color197 + Color198 + Color199 + Color200 + Color201 + Color202 + Color203 + Color204 + Color205 + Color206 + Color207 + Color208 + Color209 + Color210 + Color211 + Color212 + Color213 + Color214 + Color215 + Color216 + Color217 + Color218 + Color219 + Color220 + Color221 + Color222 + Color223 + Color224 + Color225 + Color226 + Color227 + Color228 + Color229 + Color230 + Color231 + Color232 + Color233 + Color234 + Color235 + Color236 + Color237 + Color238 + Color239 + Color240 + Color241 + Color242 + Color243 + Color244 + Color245 + Color246 + Color247 + Color248 + Color249 + Color250 + Color251 + Color252 + Color253 + Color254 + Color255 + ColorAliceBlue + ColorAntiqueWhite + ColorAquaMarine + ColorAzure + ColorBeige + ColorBisque + ColorBlanchedAlmond + ColorBlueViolet + ColorBrown + ColorBurlyWood + ColorCadetBlue + ColorChartreuse + ColorChocolate + ColorCoral + ColorCornflowerBlue + ColorCornsilk + ColorCrimson + ColorDarkBlue + ColorDarkCyan + ColorDarkGoldenrod + ColorDarkGray + ColorDarkGreen + ColorDarkKhaki + ColorDarkMagenta + ColorDarkOliveGreen + ColorDarkOrange + ColorDarkOrchid + ColorDarkRed + ColorDarkSalmon + ColorDarkSeaGreen + ColorDarkSlateBlue + ColorDarkSlateGray + ColorDarkTurquoise + ColorDarkViolet + ColorDeepPink + ColorDeepSkyBlue + ColorDimGray + ColorDodgerBlue + ColorFireBrick + ColorFloralWhite + ColorForestGreen + ColorGainsboro + ColorGhostWhite + ColorGold + ColorGoldenrod + ColorGreenYellow + ColorHoneydew + ColorHotPink + ColorIndianRed + ColorIndigo + ColorIvory + ColorKhaki + ColorLavender + ColorLavenderBlush + ColorLawnGreen + ColorLemonChiffon + ColorLightBlue + ColorLightCoral + ColorLightCyan + ColorLightGoldenrodYellow + ColorLightGray + ColorLightGreen + ColorLightPink + ColorLightSalmon + ColorLightSeaGreen + ColorLightSkyBlue + ColorLightSlateGray + ColorLightSteelBlue + ColorLightYellow + ColorLimeGreen + ColorLinen + ColorMediumAquamarine + ColorMediumBlue + ColorMediumOrchid + ColorMediumPurple + ColorMediumSeaGreen + ColorMediumSlateBlue + ColorMediumSpringGreen + ColorMediumTurquoise + ColorMediumVioletRed + ColorMidnightBlue + ColorMintCream + ColorMistyRose + ColorMoccasin + ColorNavajoWhite + ColorOldLace + ColorOliveDrab + ColorOrange + ColorOrangeRed + ColorOrchid + ColorPaleGoldenrod + ColorPaleGreen + ColorPaleTurquoise + ColorPaleVioletRed + ColorPapayaWhip + ColorPeachPuff + ColorPeru + ColorPink + ColorPlum + ColorPowderBlue + ColorRebeccaPurple + ColorRosyBrown + ColorRoyalBlue + ColorSaddleBrown + ColorSalmon + ColorSandyBrown + ColorSeaGreen + ColorSeashell + ColorSienna + ColorSkyblue + ColorSlateBlue + ColorSlateGray + ColorSnow + ColorSpringGreen + ColorSteelBlue + ColorTan + ColorThistle + ColorTomato + ColorTurquoise + ColorViolet + ColorWheat + ColorWhiteSmoke + ColorYellowGreen +) + +// These are aliases for the color gray, because some of us spell +// it as grey. +const ( + ColorGrey = ColorGray + ColorDimGrey = ColorDimGray + ColorDarkGrey = ColorDarkGray + ColorDarkSlateGrey = ColorDarkSlateGray + ColorLightGrey = ColorLightGray + ColorLightSlateGrey = ColorLightSlateGray + ColorSlateGrey = ColorSlateGray +) + +// ColorValues maps color constants to their RGB values. +var ColorValues = map[Color]int32{ + ColorBlack: 0x000000, + ColorMaroon: 0x800000, + ColorGreen: 0x008000, + ColorOlive: 0x808000, + ColorNavy: 0x000080, + ColorPurple: 0x800080, + ColorTeal: 0x008080, + ColorSilver: 0xC0C0C0, + ColorGray: 0x808080, + ColorRed: 0xFF0000, + ColorLime: 0x00FF00, + ColorYellow: 0xFFFF00, + ColorBlue: 0x0000FF, + ColorFuchsia: 0xFF00FF, + ColorAqua: 0x00FFFF, + ColorWhite: 0xFFFFFF, + Color16: 0x000000, // black + Color17: 0x00005F, + Color18: 0x000087, + Color19: 0x0000AF, + Color20: 0x0000D7, + Color21: 0x0000FF, // blue + Color22: 0x005F00, + Color23: 0x005F5F, + Color24: 0x005F87, + Color25: 0x005FAF, + Color26: 0x005FD7, + Color27: 0x005FFF, + Color28: 0x008700, + Color29: 0x00875F, + Color30: 0x008787, + Color31: 0x0087Af, + Color32: 0x0087D7, + Color33: 0x0087FF, + Color34: 0x00AF00, + Color35: 0x00AF5F, + Color36: 0x00AF87, + Color37: 0x00AFAF, + Color38: 0x00AFD7, + Color39: 0x00AFFF, + Color40: 0x00D700, + Color41: 0x00D75F, + Color42: 0x00D787, + Color43: 0x00D7AF, + Color44: 0x00D7D7, + Color45: 0x00D7FF, + Color46: 0x00FF00, // lime + Color47: 0x00FF5F, + Color48: 0x00FF87, + Color49: 0x00FFAF, + Color50: 0x00FFd7, + Color51: 0x00FFFF, // aqua + Color52: 0x5F0000, + Color53: 0x5F005F, + Color54: 0x5F0087, + Color55: 0x5F00AF, + Color56: 0x5F00D7, + Color57: 0x5F00FF, + Color58: 0x5F5F00, + Color59: 0x5F5F5F, + Color60: 0x5F5F87, + Color61: 0x5F5FAF, + Color62: 0x5F5FD7, + Color63: 0x5F5FFF, + Color64: 0x5F8700, + Color65: 0x5F875F, + Color66: 0x5F8787, + Color67: 0x5F87AF, + Color68: 0x5F87D7, + Color69: 0x5F87FF, + Color70: 0x5FAF00, + Color71: 0x5FAF5F, + Color72: 0x5FAF87, + Color73: 0x5FAFAF, + Color74: 0x5FAFD7, + Color75: 0x5FAFFF, + Color76: 0x5FD700, + Color77: 0x5FD75F, + Color78: 0x5FD787, + Color79: 0x5FD7AF, + Color80: 0x5FD7D7, + Color81: 0x5FD7FF, + Color82: 0x5FFF00, + Color83: 0x5FFF5F, + Color84: 0x5FFF87, + Color85: 0x5FFFAF, + Color86: 0x5FFFD7, + Color87: 0x5FFFFF, + Color88: 0x870000, + Color89: 0x87005F, + Color90: 0x870087, + Color91: 0x8700AF, + Color92: 0x8700D7, + Color93: 0x8700FF, + Color94: 0x875F00, + Color95: 0x875F5F, + Color96: 0x875F87, + Color97: 0x875FAF, + Color98: 0x875FD7, + Color99: 0x875FFF, + Color100: 0x878700, + Color101: 0x87875F, + Color102: 0x878787, + Color103: 0x8787AF, + Color104: 0x8787D7, + Color105: 0x8787FF, + Color106: 0x87AF00, + Color107: 0x87AF5F, + Color108: 0x87AF87, + Color109: 0x87AFAF, + Color110: 0x87AFD7, + Color111: 0x87AFFF, + Color112: 0x87D700, + Color113: 0x87D75F, + Color114: 0x87D787, + Color115: 0x87D7AF, + Color116: 0x87D7D7, + Color117: 0x87D7FF, + Color118: 0x87FF00, + Color119: 0x87FF5F, + Color120: 0x87FF87, + Color121: 0x87FFAF, + Color122: 0x87FFD7, + Color123: 0x87FFFF, + Color124: 0xAF0000, + Color125: 0xAF005F, + Color126: 0xAF0087, + Color127: 0xAF00AF, + Color128: 0xAF00D7, + Color129: 0xAF00FF, + Color130: 0xAF5F00, + Color131: 0xAF5F5F, + Color132: 0xAF5F87, + Color133: 0xAF5FAF, + Color134: 0xAF5FD7, + Color135: 0xAF5FFF, + Color136: 0xAF8700, + Color137: 0xAF875F, + Color138: 0xAF8787, + Color139: 0xAF87AF, + Color140: 0xAF87D7, + Color141: 0xAF87FF, + Color142: 0xAFAF00, + Color143: 0xAFAF5F, + Color144: 0xAFAF87, + Color145: 0xAFAFAF, + Color146: 0xAFAFD7, + Color147: 0xAFAFFF, + Color148: 0xAFD700, + Color149: 0xAFD75F, + Color150: 0xAFD787, + Color151: 0xAFD7AF, + Color152: 0xAFD7D7, + Color153: 0xAFD7FF, + Color154: 0xAFFF00, + Color155: 0xAFFF5F, + Color156: 0xAFFF87, + Color157: 0xAFFFAF, + Color158: 0xAFFFD7, + Color159: 0xAFFFFF, + Color160: 0xD70000, + Color161: 0xD7005F, + Color162: 0xD70087, + Color163: 0xD700AF, + Color164: 0xD700D7, + Color165: 0xD700FF, + Color166: 0xD75F00, + Color167: 0xD75F5F, + Color168: 0xD75F87, + Color169: 0xD75FAF, + Color170: 0xD75FD7, + Color171: 0xD75FFF, + Color172: 0xD78700, + Color173: 0xD7875F, + Color174: 0xD78787, + Color175: 0xD787AF, + Color176: 0xD787D7, + Color177: 0xD787FF, + Color178: 0xD7AF00, + Color179: 0xD7AF5F, + Color180: 0xD7AF87, + Color181: 0xD7AFAF, + Color182: 0xD7AFD7, + Color183: 0xD7AFFF, + Color184: 0xD7D700, + Color185: 0xD7D75F, + Color186: 0xD7D787, + Color187: 0xD7D7AF, + Color188: 0xD7D7D7, + Color189: 0xD7D7FF, + Color190: 0xD7FF00, + Color191: 0xD7FF5F, + Color192: 0xD7FF87, + Color193: 0xD7FFAF, + Color194: 0xD7FFD7, + Color195: 0xD7FFFF, + Color196: 0xFF0000, // red + Color197: 0xFF005F, + Color198: 0xFF0087, + Color199: 0xFF00AF, + Color200: 0xFF00D7, + Color201: 0xFF00FF, // fuchsia + Color202: 0xFF5F00, + Color203: 0xFF5F5F, + Color204: 0xFF5F87, + Color205: 0xFF5FAF, + Color206: 0xFF5FD7, + Color207: 0xFF5FFF, + Color208: 0xFF8700, + Color209: 0xFF875F, + Color210: 0xFF8787, + Color211: 0xFF87AF, + Color212: 0xFF87D7, + Color213: 0xFF87FF, + Color214: 0xFFAF00, + Color215: 0xFFAF5F, + Color216: 0xFFAF87, + Color217: 0xFFAFAF, + Color218: 0xFFAFD7, + Color219: 0xFFAFFF, + Color220: 0xFFD700, + Color221: 0xFFD75F, + Color222: 0xFFD787, + Color223: 0xFFD7AF, + Color224: 0xFFD7D7, + Color225: 0xFFD7FF, + Color226: 0xFFFF00, // yellow + Color227: 0xFFFF5F, + Color228: 0xFFFF87, + Color229: 0xFFFFAF, + Color230: 0xFFFFD7, + Color231: 0xFFFFFF, // white + Color232: 0x080808, + Color233: 0x121212, + Color234: 0x1C1C1C, + Color235: 0x262626, + Color236: 0x303030, + Color237: 0x3A3A3A, + Color238: 0x444444, + Color239: 0x4E4E4E, + Color240: 0x585858, + Color241: 0x626262, + Color242: 0x6C6C6C, + Color243: 0x767676, + Color244: 0x808080, // grey + Color245: 0x8A8A8A, + Color246: 0x949494, + Color247: 0x9E9E9E, + Color248: 0xA8A8A8, + Color249: 0xB2B2B2, + Color250: 0xBCBCBC, + Color251: 0xC6C6C6, + Color252: 0xD0D0D0, + Color253: 0xDADADA, + Color254: 0xE4E4E4, + Color255: 0xEEEEEE, + ColorAliceBlue: 0xF0F8FF, + ColorAntiqueWhite: 0xFAEBD7, + ColorAquaMarine: 0x7FFFD4, + ColorAzure: 0xF0FFFF, + ColorBeige: 0xF5F5DC, + ColorBisque: 0xFFE4C4, + ColorBlanchedAlmond: 0xFFEBCD, + ColorBlueViolet: 0x8A2BE2, + ColorBrown: 0xA52A2A, + ColorBurlyWood: 0xDEB887, + ColorCadetBlue: 0x5F9EA0, + ColorChartreuse: 0x7FFF00, + ColorChocolate: 0xD2691E, + ColorCoral: 0xFF7F50, + ColorCornflowerBlue: 0x6495ED, + ColorCornsilk: 0xFFF8DC, + ColorCrimson: 0xDC143C, + ColorDarkBlue: 0x00008B, + ColorDarkCyan: 0x008B8B, + ColorDarkGoldenrod: 0xB8860B, + ColorDarkGray: 0xA9A9A9, + ColorDarkGreen: 0x006400, + ColorDarkKhaki: 0xBDB76B, + ColorDarkMagenta: 0x8B008B, + ColorDarkOliveGreen: 0x556B2F, + ColorDarkOrange: 0xFF8C00, + ColorDarkOrchid: 0x9932CC, + ColorDarkRed: 0x8B0000, + ColorDarkSalmon: 0xE9967A, + ColorDarkSeaGreen: 0x8FBC8F, + ColorDarkSlateBlue: 0x483D8B, + ColorDarkSlateGray: 0x2F4F4F, + ColorDarkTurquoise: 0x00CED1, + ColorDarkViolet: 0x9400D3, + ColorDeepPink: 0xFF1493, + ColorDeepSkyBlue: 0x00BFFF, + ColorDimGray: 0x696969, + ColorDodgerBlue: 0x1E90FF, + ColorFireBrick: 0xB22222, + ColorFloralWhite: 0xFFFAF0, + ColorForestGreen: 0x228B22, + ColorGainsboro: 0xDCDCDC, + ColorGhostWhite: 0xF8F8FF, + ColorGold: 0xFFD700, + ColorGoldenrod: 0xDAA520, + ColorGreenYellow: 0xADFF2F, + ColorHoneydew: 0xF0FFF0, + ColorHotPink: 0xFF69B4, + ColorIndianRed: 0xCD5C5C, + ColorIndigo: 0x4B0082, + ColorIvory: 0xFFFFF0, + ColorKhaki: 0xF0E68C, + ColorLavender: 0xE6E6FA, + ColorLavenderBlush: 0xFFF0F5, + ColorLawnGreen: 0x7CFC00, + ColorLemonChiffon: 0xFFFACD, + ColorLightBlue: 0xADD8E6, + ColorLightCoral: 0xF08080, + ColorLightCyan: 0xE0FFFF, + ColorLightGoldenrodYellow: 0xFAFAD2, + ColorLightGray: 0xD3D3D3, + ColorLightGreen: 0x90EE90, + ColorLightPink: 0xFFB6C1, + ColorLightSalmon: 0xFFA07A, + ColorLightSeaGreen: 0x20B2AA, + ColorLightSkyBlue: 0x87CEFA, + ColorLightSlateGray: 0x778899, + ColorLightSteelBlue: 0xB0C4DE, + ColorLightYellow: 0xFFFFE0, + ColorLimeGreen: 0x32CD32, + ColorLinen: 0xFAF0E6, + ColorMediumAquamarine: 0x66CDAA, + ColorMediumBlue: 0x0000CD, + ColorMediumOrchid: 0xBA55D3, + ColorMediumPurple: 0x9370DB, + ColorMediumSeaGreen: 0x3CB371, + ColorMediumSlateBlue: 0x7B68EE, + ColorMediumSpringGreen: 0x00FA9A, + ColorMediumTurquoise: 0x48D1CC, + ColorMediumVioletRed: 0xC71585, + ColorMidnightBlue: 0x191970, + ColorMintCream: 0xF5FFFA, + ColorMistyRose: 0xFFE4E1, + ColorMoccasin: 0xFFE4B5, + ColorNavajoWhite: 0xFFDEAD, + ColorOldLace: 0xFDF5E6, + ColorOliveDrab: 0x6B8E23, + ColorOrange: 0xFFA500, + ColorOrangeRed: 0xFF4500, + ColorOrchid: 0xDA70D6, + ColorPaleGoldenrod: 0xEEE8AA, + ColorPaleGreen: 0x98FB98, + ColorPaleTurquoise: 0xAFEEEE, + ColorPaleVioletRed: 0xDB7093, + ColorPapayaWhip: 0xFFEFD5, + ColorPeachPuff: 0xFFDAB9, + ColorPeru: 0xCD853F, + ColorPink: 0xFFC0CB, + ColorPlum: 0xDDA0DD, + ColorPowderBlue: 0xB0E0E6, + ColorRebeccaPurple: 0x663399, + ColorRosyBrown: 0xBC8F8F, + ColorRoyalBlue: 0x4169E1, + ColorSaddleBrown: 0x8B4513, + ColorSalmon: 0xFA8072, + ColorSandyBrown: 0xF4A460, + ColorSeaGreen: 0x2E8B57, + ColorSeashell: 0xFFF5EE, + ColorSienna: 0xA0522D, + ColorSkyblue: 0x87CEEB, + ColorSlateBlue: 0x6A5ACD, + ColorSlateGray: 0x708090, + ColorSnow: 0xFFFAFA, + ColorSpringGreen: 0x00FF7F, + ColorSteelBlue: 0x4682B4, + ColorTan: 0xD2B48C, + ColorThistle: 0xD8BFD8, + ColorTomato: 0xFF6347, + ColorTurquoise: 0x40E0D0, + ColorViolet: 0xEE82EE, + ColorWheat: 0xF5DEB3, + ColorWhiteSmoke: 0xF5F5F5, + ColorYellowGreen: 0x9ACD32, +} + +// ColorNames holds the written names of colors. Useful to present a list of +// recognized named colors. +var ColorNames = map[string]Color{ + "black": ColorBlack, + "maroon": ColorMaroon, + "green": ColorGreen, + "olive": ColorOlive, + "navy": ColorNavy, + "purple": ColorPurple, + "teal": ColorTeal, + "silver": ColorSilver, + "gray": ColorGray, + "red": ColorRed, + "lime": ColorLime, + "yellow": ColorYellow, + "blue": ColorBlue, + "fuchsia": ColorFuchsia, + "aqua": ColorAqua, + "white": ColorWhite, + "aliceblue": ColorAliceBlue, + "antiquewhite": ColorAntiqueWhite, + "aquamarine": ColorAquaMarine, + "azure": ColorAzure, + "beige": ColorBeige, + "bisque": ColorBisque, + "blanchedalmond": ColorBlanchedAlmond, + "blueviolet": ColorBlueViolet, + "brown": ColorBrown, + "burlywood": ColorBurlyWood, + "cadetblue": ColorCadetBlue, + "chartreuse": ColorChartreuse, + "chocolate": ColorChocolate, + "coral": ColorCoral, + "cornflowerblue": ColorCornflowerBlue, + "cornsilk": ColorCornsilk, + "crimson": ColorCrimson, + "darkblue": ColorDarkBlue, + "darkcyan": ColorDarkCyan, + "darkgoldenrod": ColorDarkGoldenrod, + "darkgray": ColorDarkGray, + "darkgreen": ColorDarkGreen, + "darkkhaki": ColorDarkKhaki, + "darkmagenta": ColorDarkMagenta, + "darkolivegreen": ColorDarkOliveGreen, + "darkorange": ColorDarkOrange, + "darkorchid": ColorDarkOrchid, + "darkred": ColorDarkRed, + "darksalmon": ColorDarkSalmon, + "darkseagreen": ColorDarkSeaGreen, + "darkslateblue": ColorDarkSlateBlue, + "darkslategray": ColorDarkSlateGray, + "darkturquoise": ColorDarkTurquoise, + "darkviolet": ColorDarkViolet, + "deeppink": ColorDeepPink, + "deepskyblue": ColorDeepSkyBlue, + "dimgray": ColorDimGray, + "dodgerblue": ColorDodgerBlue, + "firebrick": ColorFireBrick, + "floralwhite": ColorFloralWhite, + "forestgreen": ColorForestGreen, + "gainsboro": ColorGainsboro, + "ghostwhite": ColorGhostWhite, + "gold": ColorGold, + "goldenrod": ColorGoldenrod, + "greenyellow": ColorGreenYellow, + "honeydew": ColorHoneydew, + "hotpink": ColorHotPink, + "indianred": ColorIndianRed, + "indigo": ColorIndigo, + "ivory": ColorIvory, + "khaki": ColorKhaki, + "lavender": ColorLavender, + "lavenderblush": ColorLavenderBlush, + "lawngreen": ColorLawnGreen, + "lemonchiffon": ColorLemonChiffon, + "lightblue": ColorLightBlue, + "lightcoral": ColorLightCoral, + "lightcyan": ColorLightCyan, + "lightgoldenrodyellow": ColorLightGoldenrodYellow, + "lightgray": ColorLightGray, + "lightgreen": ColorLightGreen, + "lightpink": ColorLightPink, + "lightsalmon": ColorLightSalmon, + "lightseagreen": ColorLightSeaGreen, + "lightskyblue": ColorLightSkyBlue, + "lightslategray": ColorLightSlateGray, + "lightsteelblue": ColorLightSteelBlue, + "lightyellow": ColorLightYellow, + "limegreen": ColorLimeGreen, + "linen": ColorLinen, + "mediumaquamarine": ColorMediumAquamarine, + "mediumblue": ColorMediumBlue, + "mediumorchid": ColorMediumOrchid, + "mediumpurple": ColorMediumPurple, + "mediumseagreen": ColorMediumSeaGreen, + "mediumslateblue": ColorMediumSlateBlue, + "mediumspringgreen": ColorMediumSpringGreen, + "mediumturquoise": ColorMediumTurquoise, + "mediumvioletred": ColorMediumVioletRed, + "midnightblue": ColorMidnightBlue, + "mintcream": ColorMintCream, + "mistyrose": ColorMistyRose, + "moccasin": ColorMoccasin, + "navajowhite": ColorNavajoWhite, + "oldlace": ColorOldLace, + "olivedrab": ColorOliveDrab, + "orange": ColorOrange, + "orangered": ColorOrangeRed, + "orchid": ColorOrchid, + "palegoldenrod": ColorPaleGoldenrod, + "palegreen": ColorPaleGreen, + "paleturquoise": ColorPaleTurquoise, + "palevioletred": ColorPaleVioletRed, + "papayawhip": ColorPapayaWhip, + "peachpuff": ColorPeachPuff, + "peru": ColorPeru, + "pink": ColorPink, + "plum": ColorPlum, + "powderblue": ColorPowderBlue, + "rebeccapurple": ColorRebeccaPurple, + "rosybrown": ColorRosyBrown, + "royalblue": ColorRoyalBlue, + "saddlebrown": ColorSaddleBrown, + "salmon": ColorSalmon, + "sandybrown": ColorSandyBrown, + "seagreen": ColorSeaGreen, + "seashell": ColorSeashell, + "sienna": ColorSienna, + "skyblue": ColorSkyblue, + "slateblue": ColorSlateBlue, + "slategray": ColorSlateGray, + "snow": ColorSnow, + "springgreen": ColorSpringGreen, + "steelblue": ColorSteelBlue, + "tan": ColorTan, + "thistle": ColorThistle, + "tomato": ColorTomato, + "turquoise": ColorTurquoise, + "violet": ColorViolet, + "wheat": ColorWheat, + "whitesmoke": ColorWhiteSmoke, + "yellowgreen": ColorYellowGreen, + "grey": ColorGray, + "dimgrey": ColorDimGray, + "darkgrey": ColorDarkGray, + "darkslategrey": ColorDarkSlateGray, + "lightgrey": ColorLightGray, + "lightslategrey": ColorLightSlateGray, + "slategrey": ColorSlateGray, +} + +// Hex returns the color's hexadecimal RGB 24-bit value with each component +// consisting of a single byte, ala R << 16 | G << 8 | B. If the color +// is unknown or unset, -1 is returned. +func (c Color) Hex() int32 { + if c&ColorIsRGB != 0 { + return (int32(c) & 0xffffff) + } + if v, ok := ColorValues[c]; ok { + return v + } + return -1 +} + +// RGB returns the red, green, and blue components of the color, with +// each component represented as a value 0-255. In the event that the +// color cannot be broken up (not set usually), -1 is returned for each value. +func (c Color) RGB() (int32, int32, int32) { + v := c.Hex() + if v < 0 { + return -1, -1, -1 + } + return (v >> 16) & 0xff, (v >> 8) & 0xff, v & 0xff +} + +// NewRGBColor returns a new color with the given red, green, and blue values. +// Each value must be represented in the range 0-255. +func NewRGBColor(r, g, b int32) Color { + return NewHexColor(((r & 0xff) << 16) | ((g & 0xff) << 8) | (b & 0xff)) +} + +// NewHexColor returns a color using the given 24-bit RGB value. +func NewHexColor(v int32) Color { + return ColorIsRGB | Color(v) +} + +// GetColor creates a Color from a color name (W3C name). A hex value may +// be supplied as a string in the format "#ffffff". +func GetColor(name string) Color { + if c, ok := ColorNames[name]; ok { + return c + } + if len(name) == 7 && name[0] == '#' { + if v, e := strconv.ParseInt(name[1:], 16, 32); e == nil { + return NewHexColor(int32(v)) + } + } + return ColorDefault +} diff --git a/vendor/github.com/gdamore/tcell/colorfit.go b/vendor/github.com/gdamore/tcell/colorfit.go new file mode 100644 index 00000000000..b7740b8ae24 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/colorfit.go @@ -0,0 +1,52 @@ +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "github.com/lucasb-eyer/go-colorful" + "math" +) + +// FindColor attempts to find a given color, or the best match possible for it, +// from the palette given. This is an expensive operation, so results should +// be cached by the caller. +func FindColor(c Color, palette []Color) Color { + match := ColorDefault + dist := float64(0) + r, g, b := c.RGB() + c1 := colorful.Color{ + R: float64(r) / 255.0, + G: float64(g) / 255.0, + B: float64(b) / 255.0, + } + for _, d := range palette { + r, g, b = d.RGB() + c2 := colorful.Color{ + R: float64(r) / 255.0, + G: float64(g) / 255.0, + B: float64(b) / 255.0, + } + // CIE94 is more accurate, but really really expensive. + nd := c1.DistanceCIE76(c2) + if math.IsNaN(nd) { + nd = math.Inf(1) + } + if match == ColorDefault || nd < dist { + match = d + dist = nd + } + } + return match +} diff --git a/vendor/github.com/gdamore/tcell/console_stub.go b/vendor/github.com/gdamore/tcell/console_stub.go new file mode 100644 index 00000000000..fda2f0926ce --- /dev/null +++ b/vendor/github.com/gdamore/tcell/console_stub.go @@ -0,0 +1,23 @@ +// +build !windows + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// NewConsoleScreen returns a console based screen. This platform +// doesn't have support for any, so it returns nil and a suitable error. +func NewConsoleScreen() (Screen, error) { + return nil, ErrNoScreen +} diff --git a/vendor/github.com/gdamore/tcell/console_win.go b/vendor/github.com/gdamore/tcell/console_win.go new file mode 100644 index 00000000000..bd05fdf00e1 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/console_win.go @@ -0,0 +1,1032 @@ +// +build windows + +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "errors" + "sync" + "syscall" + "unicode/utf16" + "unsafe" +) + +type cScreen struct { + in syscall.Handle + out syscall.Handle + cancelflag syscall.Handle + scandone chan struct{} + evch chan Event + quit chan struct{} + curx int + cury int + style Style + clear bool + fini bool + + w int + h int + + oscreen consoleInfo + ocursor cursorInfo + oimode uint32 + oomode uint32 + cells CellBuffer + colors map[Color]Color + + sync.Mutex +} + +var winLock sync.Mutex + +var winPalette = []Color{ + ColorBlack, + ColorMaroon, + ColorGreen, + ColorNavy, + ColorOlive, + ColorPurple, + ColorTeal, + ColorSilver, + ColorGray, + ColorRed, + ColorLime, + ColorBlue, + ColorYellow, + ColorFuchsia, + ColorAqua, + ColorWhite, +} + +var winColors = map[Color]Color{ + ColorBlack: ColorBlack, + ColorMaroon: ColorMaroon, + ColorGreen: ColorGreen, + ColorNavy: ColorNavy, + ColorOlive: ColorOlive, + ColorPurple: ColorPurple, + ColorTeal: ColorTeal, + ColorSilver: ColorSilver, + ColorGray: ColorGray, + ColorRed: ColorRed, + ColorLime: ColorLime, + ColorBlue: ColorBlue, + ColorYellow: ColorYellow, + ColorFuchsia: ColorFuchsia, + ColorAqua: ColorAqua, + ColorWhite: ColorWhite, +} + +var k32 = syscall.NewLazyDLL("kernel32.dll") + +// We have to bring in the kernel32.dll directly, so we can get access to some +// system calls that the core Go API lacks. +// +// Note that Windows appends some functions with W to indicate that wide +// characters (Unicode) are in use. The documentation refers to them +// without this suffix, as the resolution is made via preprocessor. +var ( + procReadConsoleInput = k32.NewProc("ReadConsoleInputW") + procWaitForMultipleObjects = k32.NewProc("WaitForMultipleObjects") + procCreateEvent = k32.NewProc("CreateEventW") + procSetEvent = k32.NewProc("SetEvent") + procGetConsoleCursorInfo = k32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = k32.NewProc("SetConsoleCursorInfo") + procSetConsoleCursorPosition = k32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = k32.NewProc("SetConsoleMode") + procGetConsoleMode = k32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = k32.NewProc("GetConsoleScreenBufferInfo") + procFillConsoleOutputAttribute = k32.NewProc("FillConsoleOutputAttribute") + procFillConsoleOutputCharacter = k32.NewProc("FillConsoleOutputCharacterW") + procSetConsoleWindowInfo = k32.NewProc("SetConsoleWindowInfo") + procSetConsoleScreenBufferSize = k32.NewProc("SetConsoleScreenBufferSize") + procSetConsoleTextAttribute = k32.NewProc("SetConsoleTextAttribute") +) + +const ( + w32Infinite = ^uintptr(0) + w32WaitObject0 = uintptr(0) +) + +// NewConsoleScreen returns a Screen for the Windows console associated +// with the current process. The Screen makes use of the Windows Console +// API to display content and read events. +func NewConsoleScreen() (Screen, error) { + return &cScreen{}, nil +} + +func (s *cScreen) Init() error { + s.evch = make(chan Event, 10) + s.quit = make(chan struct{}) + s.scandone = make(chan struct{}) + + in, e := syscall.Open("CONIN$", syscall.O_RDWR, 0) + if e != nil { + return e + } + s.in = in + out, e := syscall.Open("CONOUT$", syscall.O_RDWR, 0) + if e != nil { + syscall.Close(s.in) + return e + } + s.out = out + + cf, _, e := procCreateEvent.Call( + uintptr(0), + uintptr(1), + uintptr(0), + uintptr(0)) + if cf == uintptr(0) { + return e + } + s.cancelflag = syscall.Handle(cf) + + s.Lock() + + s.curx = -1 + s.cury = -1 + s.style = StyleDefault + s.getCursorInfo(&s.ocursor) + s.getConsoleInfo(&s.oscreen) + s.getOutMode(&s.oomode) + s.getInMode(&s.oimode) + s.resize() + + s.fini = false + s.setInMode(modeResizeEn) + s.setOutMode(0) + s.clearScreen(s.style) + s.hideCursor() + s.Unlock() + go s.scanInput() + + return nil +} + +func (s *cScreen) CharacterSet() string { + // We are always UTF-16LE on Windows + return "UTF-16LE" +} + +func (s *cScreen) EnableMouse() { + s.setInMode(modeResizeEn | modeMouseEn | modeExtndFlg) +} + +func (s *cScreen) DisableMouse() { + s.setInMode(modeResizeEn) +} + +func (s *cScreen) Fini() { + s.Lock() + s.style = StyleDefault + s.curx = -1 + s.cury = -1 + s.fini = true + s.Unlock() + + s.setCursorInfo(&s.ocursor) + s.setInMode(s.oimode) + s.setOutMode(s.oomode) + s.setBufferSize(int(s.oscreen.size.x), int(s.oscreen.size.y)) + s.clearScreen(StyleDefault) + s.setCursorPos(0, 0) + procSetConsoleTextAttribute.Call( + uintptr(s.out), + uintptr(s.mapStyle(StyleDefault))) + + close(s.quit) + procSetEvent.Call(uintptr(s.cancelflag)) + // Block until scanInput returns; this prevents a race condition on Win 8+ + // which causes syscall.Close to block until another keypress is read. + <-s.scandone + syscall.Close(s.in) + syscall.Close(s.out) +} + +func (s *cScreen) PostEventWait(ev Event) { + s.evch <- ev +} + +func (s *cScreen) PostEvent(ev Event) error { + select { + case s.evch <- ev: + return nil + default: + return ErrEventQFull + } +} + +func (s *cScreen) PollEvent() Event { + select { + case <-s.quit: + return nil + case ev := <-s.evch: + return ev + } +} + +type cursorInfo struct { + size uint32 + visible uint32 +} + +type coord struct { + x int16 + y int16 +} + +func (c coord) uintptr() uintptr { + // little endian, put x first + return uintptr(c.x) | (uintptr(c.y) << 16) +} + +type rect struct { + left int16 + top int16 + right int16 + bottom int16 +} + +func (s *cScreen) showCursor() { + s.setCursorInfo(&cursorInfo{size: 100, visible: 1}) +} + +func (s *cScreen) hideCursor() { + s.setCursorInfo(&cursorInfo{size: 1, visible: 0}) +} + +func (s *cScreen) ShowCursor(x, y int) { + s.Lock() + if !s.fini { + s.curx = x + s.cury = y + } + s.doCursor() + s.Unlock() +} + +func (s *cScreen) doCursor() { + x, y := s.curx, s.cury + + if x < 0 || y < 0 || x >= s.w || y >= s.h { + s.setCursorPos(0, 0) + s.hideCursor() + } else { + s.setCursorPos(x, y) + s.showCursor() + } +} + +func (s *cScreen) HideCursor() { + s.ShowCursor(-1, -1) +} + +type charInfo struct { + ch uint16 + attr uint16 +} + +type inputRecord struct { + typ uint16 + _ uint16 + data [16]byte +} + +const ( + keyEvent uint16 = 1 + mouseEvent uint16 = 2 + resizeEvent uint16 = 4 + menuEvent uint16 = 8 // don't use + focusEvent uint16 = 16 // don't use +) + +type mouseRecord struct { + x int16 + y int16 + btns uint32 + mod uint32 + flags uint32 +} + +const ( + mouseDoubleClick uint32 = 0x2 + mouseHWheeled uint32 = 0x8 + mouseVWheeled uint32 = 0x4 + mouseMoved uint32 = 0x1 +) + +type resizeRecord struct { + x int16 + y int16 +} + +type keyRecord struct { + isdown int32 + repeat uint16 + kcode uint16 + scode uint16 + ch uint16 + mod uint32 +} + +const ( + // Constants per Microsoft. We don't put the modifiers + // here. + vkCancel = 0x03 + vkBack = 0x08 // Backspace + vkTab = 0x09 + vkClear = 0x0c + vkReturn = 0x0d + vkPause = 0x13 + vkEscape = 0x1b + vkSpace = 0x20 + vkPrior = 0x21 // PgUp + vkNext = 0x22 // PgDn + vkEnd = 0x23 + vkHome = 0x24 + vkLeft = 0x25 + vkUp = 0x26 + vkRight = 0x27 + vkDown = 0x28 + vkPrint = 0x2a + vkPrtScr = 0x2c + vkInsert = 0x2d + vkDelete = 0x2e + vkHelp = 0x2f + vkF1 = 0x70 + vkF2 = 0x71 + vkF3 = 0x72 + vkF4 = 0x73 + vkF5 = 0x74 + vkF6 = 0x75 + vkF7 = 0x76 + vkF8 = 0x77 + vkF9 = 0x78 + vkF10 = 0x79 + vkF11 = 0x7a + vkF12 = 0x7b + vkF13 = 0x7c + vkF14 = 0x7d + vkF15 = 0x7e + vkF16 = 0x7f + vkF17 = 0x80 + vkF18 = 0x81 + vkF19 = 0x82 + vkF20 = 0x83 + vkF21 = 0x84 + vkF22 = 0x85 + vkF23 = 0x86 + vkF24 = 0x87 +) + +var vkKeys = map[uint16]Key{ + vkCancel: KeyCancel, + vkBack: KeyBackspace, + vkTab: KeyTab, + vkClear: KeyClear, + vkPause: KeyPause, + vkPrint: KeyPrint, + vkPrtScr: KeyPrint, + vkPrior: KeyPgUp, + vkNext: KeyPgDn, + vkReturn: KeyEnter, + vkEnd: KeyEnd, + vkHome: KeyHome, + vkLeft: KeyLeft, + vkUp: KeyUp, + vkRight: KeyRight, + vkDown: KeyDown, + vkInsert: KeyInsert, + vkDelete: KeyDelete, + vkHelp: KeyHelp, + vkF1: KeyF1, + vkF2: KeyF2, + vkF3: KeyF3, + vkF4: KeyF4, + vkF5: KeyF5, + vkF6: KeyF6, + vkF7: KeyF7, + vkF8: KeyF8, + vkF9: KeyF9, + vkF10: KeyF10, + vkF11: KeyF11, + vkF12: KeyF12, + vkF13: KeyF13, + vkF14: KeyF14, + vkF15: KeyF15, + vkF16: KeyF16, + vkF17: KeyF17, + vkF18: KeyF18, + vkF19: KeyF19, + vkF20: KeyF20, + vkF21: KeyF21, + vkF22: KeyF22, + vkF23: KeyF23, + vkF24: KeyF24, +} + +// NB: All Windows platforms are little endian. We assume this +// never, ever change. The following code is endian safe. and does +// not use unsafe pointers. +func getu32(v []byte) uint32 { + return uint32(v[0]) + (uint32(v[1]) << 8) + (uint32(v[2]) << 16) + (uint32(v[3]) << 24) +} +func geti32(v []byte) int32 { + return int32(getu32(v)) +} +func getu16(v []byte) uint16 { + return uint16(v[0]) + (uint16(v[1]) << 8) +} +func geti16(v []byte) int16 { + return int16(getu16(v)) +} + +// Convert windows dwControlKeyState to modifier mask +func mod2mask(cks uint32) ModMask { + mm := ModNone + // Left or right control + if (cks & (0x0008 | 0x0004)) != 0 { + mm |= ModCtrl + } + // Left or right alt + if (cks & (0x0002 | 0x0001)) != 0 { + mm |= ModAlt + } + // Any shift + if (cks & 0x0010) != 0 { + mm |= ModShift + } + return mm +} + +func mrec2btns(mbtns, flags uint32) ButtonMask { + btns := ButtonNone + if mbtns&0x1 != 0 { + btns |= Button1 + } + if mbtns&0x2 != 0 { + btns |= Button2 + } + if mbtns&0x4 != 0 { + btns |= Button3 + } + if mbtns&0x8 != 0 { + btns |= Button4 + } + if mbtns&0x10 != 0 { + btns |= Button5 + } + if mbtns&0x20 != 0 { + btns |= Button6 + } + if mbtns&0x40 != 0 { + btns |= Button7 + } + if mbtns&0x80 != 0 { + btns |= Button8 + } + + if flags&mouseVWheeled != 0 { + if mbtns&0x80000000 == 0 { + btns |= WheelUp + } else { + btns |= WheelDown + } + } + if flags&mouseHWheeled != 0 { + if mbtns&0x80000000 == 0 { + btns |= WheelRight + } else { + btns |= WheelLeft + } + } + return btns +} + +func (s *cScreen) getConsoleInput() error { + // cancelFlag comes first as WaitForMultipleObjects returns the lowest index + // in the event that both events are signalled. + waitObjects := []syscall.Handle{s.cancelflag, s.in} + // As arrays are contiguous in memory, a pointer to the first object is the + // same as a pointer to the array itself. + pWaitObjects := unsafe.Pointer(&waitObjects[0]) + + rv, _, er := procWaitForMultipleObjects.Call( + uintptr(len(waitObjects)), + uintptr(pWaitObjects), + uintptr(0), + w32Infinite) + // WaitForMultipleObjects returns WAIT_OBJECT_0 + the index. + switch rv { + case w32WaitObject0: // s.cancelFlag + return errors.New("cancelled") + case w32WaitObject0 + 1: // s.in + rec := &inputRecord{} + var nrec int32 + rv, _, er := procReadConsoleInput.Call( + uintptr(s.in), + uintptr(unsafe.Pointer(rec)), + uintptr(1), + uintptr(unsafe.Pointer(&nrec))) + if rv == 0 { + return er + } + if nrec != 1 { + return nil + } + switch rec.typ { + case keyEvent: + krec := &keyRecord{} + krec.isdown = geti32(rec.data[0:]) + krec.repeat = getu16(rec.data[4:]) + krec.kcode = getu16(rec.data[6:]) + krec.scode = getu16(rec.data[8:]) + krec.ch = getu16(rec.data[10:]) + krec.mod = getu32(rec.data[12:]) + + if krec.isdown == 0 || krec.repeat < 1 { + // its a key release event, ignore it + return nil + } + if krec.ch != 0 { + // synthesized key code + for krec.repeat > 0 { + // convert shift+tab to backtab + if mod2mask(krec.mod) == ModShift && krec.ch == vkTab { + s.PostEvent(NewEventKey(KeyBacktab, 0, + ModNone)) + } else { + s.PostEvent(NewEventKey(KeyRune, rune(krec.ch), + mod2mask(krec.mod))) + } + krec.repeat-- + } + return nil + } + key := KeyNUL // impossible on Windows + ok := false + if key, ok = vkKeys[krec.kcode]; !ok { + return nil + } + for krec.repeat > 0 { + s.PostEvent(NewEventKey(key, rune(krec.ch), + mod2mask(krec.mod))) + krec.repeat-- + } + + case mouseEvent: + var mrec mouseRecord + mrec.x = geti16(rec.data[0:]) + mrec.y = geti16(rec.data[2:]) + mrec.btns = getu32(rec.data[4:]) + mrec.mod = getu32(rec.data[8:]) + mrec.flags = getu32(rec.data[12:]) + btns := mrec2btns(mrec.btns, mrec.flags) + // we ignore double click, events are delivered normally + s.PostEvent(NewEventMouse(int(mrec.x), int(mrec.y), btns, + mod2mask(mrec.mod))) + + case resizeEvent: + var rrec resizeRecord + rrec.x = geti16(rec.data[0:]) + rrec.y = geti16(rec.data[2:]) + s.PostEvent(NewEventResize(int(rrec.x), int(rrec.y))) + + default: + } + default: + return er + } + + return nil +} + +func (s *cScreen) scanInput() { + for { + if e := s.getConsoleInput(); e != nil { + close(s.scandone) + return + } + } +} + +// Windows console can display 8 characters, in either low or high intensity +func (s *cScreen) Colors() int { + return 16 +} + +var vgaColors = map[Color]uint16{ + ColorBlack: 0, + ColorMaroon: 0x4, + ColorGreen: 0x2, + ColorNavy: 0x1, + ColorOlive: 0x6, + ColorPurple: 0x5, + ColorTeal: 0x3, + ColorSilver: 0x7, + ColorGrey: 0x8, + ColorRed: 0xc, + ColorLime: 0xa, + ColorBlue: 0x9, + ColorYellow: 0xe, + ColorFuchsia: 0xd, + ColorAqua: 0xb, + ColorWhite: 0xf, +} + +// Windows uses RGB signals +func mapColor2RGB(c Color) uint16 { + winLock.Lock() + if v, ok := winColors[c]; ok { + c = v + } else { + v = FindColor(c, winPalette) + winColors[c] = v + c = v + } + winLock.Unlock() + + if vc, ok := vgaColors[c]; ok { + return vc + } + return 0 +} + +// Map a tcell style to Windows attributes +func (s *cScreen) mapStyle(style Style) uint16 { + f, b, a := style.Decompose() + fa := s.oscreen.attrs & 0xf + ba := (s.oscreen.attrs) >> 4 & 0xf + if f != ColorDefault { + fa = mapColor2RGB(f) + } + if b != ColorDefault { + ba = mapColor2RGB(b) + } + var attr uint16 + // We simulate reverse by doing the color swap ourselves. + // Apparently windows cannot really do this except in DBCS + // views. + if a&AttrReverse != 0 { + attr = ba + attr |= (fa << 4) + } else { + attr = fa + attr |= (ba << 4) + } + if a&AttrBold != 0 { + attr |= 0x8 + } + if a&AttrDim != 0 { + attr &^= 0x8 + } + if a&AttrUnderline != 0 { + // Best effort -- doesn't seem to work though. + attr |= 0x8000 + } + // Blink is unsupported + return attr +} + +func (s *cScreen) SetCell(x, y int, style Style, ch ...rune) { + if len(ch) > 0 { + s.SetContent(x, y, ch[0], ch[1:], style) + } else { + s.SetContent(x, y, ' ', nil, style) + } +} + +func (s *cScreen) SetContent(x, y int, mainc rune, combc []rune, style Style) { + s.Lock() + if !s.fini { + s.cells.SetContent(x, y, mainc, combc, style) + } + s.Unlock() +} + +func (s *cScreen) GetContent(x, y int) (rune, []rune, Style, int) { + s.Lock() + mainc, combc, style, width := s.cells.GetContent(x, y) + s.Unlock() + return mainc, combc, style, width +} + +func (s *cScreen) writeString(x, y int, style Style, ch []uint16) { + // we assume the caller has hidden the cursor + if len(ch) == 0 { + return + } + nw := uint32(len(ch)) + procSetConsoleTextAttribute.Call( + uintptr(s.out), + uintptr(s.mapStyle(style))) + s.setCursorPos(x, y) + syscall.WriteConsole(s.out, &ch[0], nw, &nw, nil) +} + +func (s *cScreen) draw() { + // allocate a scratch line bit enough for no combining chars. + // if you have combining characters, you may pay for extra allocs. + if s.clear { + s.clearScreen(s.style) + s.clear = false + s.cells.Invalidate() + } + buf := make([]uint16, 0, s.w) + wcs := buf[:] + lstyle := Style(-1) // invalid attribute + + lx, ly := -1, -1 + ra := make([]rune, 1) + + for y := 0; y < int(s.h); y++ { + for x := 0; x < int(s.w); x++ { + mainc, combc, style, width := s.cells.GetContent(x, y) + dirty := s.cells.Dirty(x, y) + if style == StyleDefault { + style = s.style + } + + if !dirty || style != lstyle { + // write out any data queued thus far + // because we are going to skip over some + // cells, or because we need to change styles + s.writeString(lx, ly, lstyle, wcs) + wcs = buf[0:0] + lstyle = Style(-1) + if !dirty { + continue + } + } + if x > s.w-width { + mainc = ' ' + combc = nil + width = 1 + } + if len(wcs) == 0 { + lstyle = style + lx = x + ly = y + } + ra[0] = mainc + wcs = append(wcs, utf16.Encode(ra)...) + if len(combc) != 0 { + wcs = append(wcs, utf16.Encode(combc)...) + } + s.cells.SetDirty(x, y, false) + x += width - 1 + } + s.writeString(lx, ly, lstyle, wcs) + wcs = buf[0:0] + lstyle = Style(-1) + } +} + +func (s *cScreen) Show() { + s.Lock() + if !s.fini { + s.hideCursor() + s.resize() + s.draw() + s.doCursor() + } + s.Unlock() +} + +func (s *cScreen) Sync() { + s.Lock() + if !s.fini { + s.cells.Invalidate() + s.hideCursor() + s.resize() + s.draw() + s.doCursor() + } + s.Unlock() +} + +type consoleInfo struct { + size coord + pos coord + attrs uint16 + win rect + maxsz coord +} + +func (s *cScreen) getConsoleInfo(info *consoleInfo) { + procGetConsoleScreenBufferInfo.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(info))) +} + +func (s *cScreen) getCursorInfo(info *cursorInfo) { + procGetConsoleCursorInfo.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(info))) +} + +func (s *cScreen) setCursorInfo(info *cursorInfo) { + procSetConsoleCursorInfo.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(info))) +} + +func (s *cScreen) setCursorPos(x, y int) { + procSetConsoleCursorPosition.Call( + uintptr(s.out), + coord{int16(x), int16(y)}.uintptr()) +} + +func (s *cScreen) setBufferSize(x, y int) { + procSetConsoleScreenBufferSize.Call( + uintptr(s.out), + coord{int16(x), int16(y)}.uintptr()) +} + +func (s *cScreen) Size() (int, int) { + s.Lock() + w, h := s.w, s.h + s.Unlock() + + return w, h +} + +func (s *cScreen) resize() { + info := consoleInfo{} + s.getConsoleInfo(&info) + + w := int((info.win.right - info.win.left) + 1) + h := int((info.win.bottom - info.win.top) + 1) + + if s.w == w && s.h == h { + return + } + + s.cells.Resize(w, h) + s.w = w + s.h = h + + r := rect{0, 0, int16(w - 1), int16(h - 1)} + procSetConsoleWindowInfo.Call( + uintptr(s.out), + uintptr(1), + uintptr(unsafe.Pointer(&r))) + + s.setBufferSize(w, h) + + s.PostEvent(NewEventResize(w, h)) +} + +func (s *cScreen) Clear() { + s.Fill(' ', s.style) +} + +func (s *cScreen) Fill(r rune, style Style) { + s.Lock() + if !s.fini { + s.cells.Fill(r, style) + s.clear = true + } + s.Unlock() +} + +func (s *cScreen) clearScreen(style Style) { + pos := coord{0, 0} + attr := s.mapStyle(style) + x, y := s.w, s.h + scratch := uint32(0) + count := uint32(x * y) + + procFillConsoleOutputAttribute.Call( + uintptr(s.out), + uintptr(attr), + uintptr(count), + pos.uintptr(), + uintptr(unsafe.Pointer(&scratch))) + procFillConsoleOutputCharacter.Call( + uintptr(s.out), + uintptr(' '), + uintptr(count), + pos.uintptr(), + uintptr(unsafe.Pointer(&scratch))) +} + +const ( + modeExtndFlg uint32 = 0x0080 + modeMouseEn uint32 = 0x0010 + modeResizeEn uint32 = 0x0008 + modeWrapEOL uint32 = 0x0002 + modeCooked uint32 = 0x0001 +) + +func (s *cScreen) setInMode(mode uint32) error { + rv, _, err := procSetConsoleMode.Call( + uintptr(s.in), + uintptr(mode)) + if rv == 0 { + return err + } + return nil +} + +func (s *cScreen) setOutMode(mode uint32) error { + rv, _, err := procSetConsoleMode.Call( + uintptr(s.out), + uintptr(mode)) + if rv == 0 { + return err + } + return nil +} + +func (s *cScreen) getInMode(v *uint32) { + procGetConsoleMode.Call( + uintptr(s.in), + uintptr(unsafe.Pointer(v))) +} + +func (s *cScreen) getOutMode(v *uint32) { + procGetConsoleMode.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(v))) +} + +func (s *cScreen) SetStyle(style Style) { + s.Lock() + s.style = style + s.Unlock() +} + +// No fallback rune support, since we have Unicode. Yay! + +func (s *cScreen) RegisterRuneFallback(r rune, subst string) { +} + +func (s *cScreen) UnregisterRuneFallback(r rune) { +} + +func (s *cScreen) CanDisplay(r rune, checkFallbacks bool) bool { + // We presume we can display anything -- we're Unicode. + // (Sadly this not precisely true. Combinings are especially + // poorly supported under Windows.) + return true +} + +func (s *cScreen) HasMouse() bool { + return true +} + +func (s *cScreen) Resize(int, int, int, int) {} + +func (s *cScreen) HasKey(k Key) bool { + // Microsoft has codes for some keys, but they are unusual, + // so we don't include them. We include all the typical + // 101, 105 key layout keys. + valid := map[Key]bool{ + KeyBackspace: true, + KeyTab: true, + KeyEscape: true, + KeyPause: true, + KeyPrint: true, + KeyPgUp: true, + KeyPgDn: true, + KeyEnter: true, + KeyEnd: true, + KeyHome: true, + KeyLeft: true, + KeyUp: true, + KeyRight: true, + KeyDown: true, + KeyInsert: true, + KeyDelete: true, + KeyF1: true, + KeyF2: true, + KeyF3: true, + KeyF4: true, + KeyF5: true, + KeyF6: true, + KeyF7: true, + KeyF8: true, + KeyF9: true, + KeyF10: true, + KeyF11: true, + KeyF12: true, + KeyRune: true, + } + + return valid[k] +} diff --git a/vendor/github.com/gdamore/tcell/doc.go b/vendor/github.com/gdamore/tcell/doc.go new file mode 100644 index 00000000000..b6719613594 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tcell provides a lower-level, portable API for building +// programs that interact with terminals or consoles. It works with +// both common (and many uncommon!) terminals or terminal emulators, +// and Windows console implementations. +// +// It provides support for up to 256 colors, text attributes, and box drawing +// elements. A database of terminals built from a real terminfo database +// is provided, along with code to generate new database entries. +// +// Tcell offers very rich support for mice, dependent upon the terminal +// of course. (Windows, XTerm, and iTerm 2 are known to work very well.) +// +// If the environment is not Unicode by default, such as an ISO8859 based +// locale or GB18030, Tcell can convert input and output, so that your +// terminal can operate in whatever locale is most convenient, while the +// application program can just assume "everything is UTF-8". Reasonable +// defaults are used for updating characters to something suitable for +// display. Unicode box drawing characters will be converted to use the +// alternate character set of your terminal, if native conversions are +// not available. If no ACS is available, then some ASCII fallbacks will +// be used. +// +// Note that support for non-UTF-8 locales (other than C) must be enabled +// by the application using RegisterEncoding() -- we don't have them all +// enabled by default to avoid bloating the application unneccessarily. +// (These days UTF-8 is good enough for almost everyone, and nobody should +// be using legacy locales anymore.) Also, actual glyphs for various code +// point will only be displayed if your terminal or emulator (or the font +// the emulator is using) supports them. +// +// A rich set of keycodes is supported, with support for up to 65 function +// keys, and various other special keys. +// +package tcell diff --git a/vendor/github.com/gdamore/tcell/encoding.go b/vendor/github.com/gdamore/tcell/encoding.go new file mode 100644 index 00000000000..596a6e8005e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/encoding.go @@ -0,0 +1,139 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "strings" + "sync" + + "golang.org/x/text/encoding" + + gencoding "github.com/gdamore/encoding" +) + +var encodings map[string]encoding.Encoding +var encodingLk sync.Mutex +var encodingFallback EncodingFallback = EncodingFallbackFail + +// RegisterEncoding may be called by the application to register an encoding. +// The presence of additional encodings will facilitate application usage with +// terminal environments where the I/O subsystem does not support Unicode. +// +// Windows systems use Unicode natively, and do not need any of the encoding +// subsystem when using Windows Console screens. +// +// Please see the Go documentation for golang.org/x/text/encoding -- most of +// the common ones exist already as stock variables. For example, ISO8859-15 +// can be registered using the following code: +// +// import "golang.org/x/text/encoding/charmap" +// +// ... +// RegisterEncoding("ISO8859-15", charmap.ISO8859_15) +// +// Aliases can be registered as well, for example "8859-15" could be an alias +// for "ISO8859-15". +// +// For POSIX systems, the tcell package will check the environment variables +// LC_ALL, LC_CTYPE, and LANG (in that order) to determine the character set. +// These are expected to have the following pattern: +// +// $language[.$codeset[@$variant] +// +// We extract only the $codeset part, which will usually be something like +// UTF-8 or ISO8859-15 or KOI8-R. Note that if the locale is either "POSIX" +// or "C", then we assume US-ASCII (the POSIX 'portable character set' +// and assume all other characters are somehow invalid.) +// +// Modern POSIX systems and terminal emulators may use UTF-8, and for those +// systems, this API is also unnecessary. For example, Darwin (MacOS X) and +// modern Linux running modern xterm generally will out of the box without +// any of this. Use of UTF-8 is recommended when possible, as it saves +// quite a lot processing overhead. +// +// Note that some encodings are quite large (for example GB18030 which is a +// superset of Unicode) and so the application size can be expected ot +// increase quite a bit as each encoding is added. The East Asian encodings +// have been seen to add 100-200K per encoding to the application size. +// +func RegisterEncoding(charset string, enc encoding.Encoding) { + encodingLk.Lock() + charset = strings.ToLower(charset) + encodings[charset] = enc + encodingLk.Unlock() +} + +// EncodingFallback describes how the system behavees when the locale +// requires a character set that we do not support. The system always +// supports UTF-8 and US-ASCII. On Windows consoles, UTF-16LE is also +// supported automatically. Other character sets must be added using the +// RegisterEncoding API. (A large group of nearly all of them can be +// added using the RegisterAll function in the encoding sub package.) +type EncodingFallback int + +const ( + // EncodingFallbackFail behavior causes GetEncoding to fail + // when it cannot find an encoding. + EncodingFallbackFail = iota + + // EncodingFallbackASCII behaviore causes GetEncoding to fall back + // to a 7-bit ASCII encoding, if no other encoding can be found. + EncodingFallbackASCII + + // EncodingFallbackUTF8 behavior causes GetEncoding to assume + // UTF8 can pass unmodified upon failure. Note that this behavior + // is not recommended, unless you are sure your terminal can cope + // with real UTF8 sequences. + EncodingFallbackUTF8 +) + +// SetEncodingFallback changes the behavior of GetEncoding when a suitable +// encoding is not found. The default is EncodingFallbackFail, which +// causes GetEncoding to simply return nil. +func SetEncodingFallback(fb EncodingFallback) { + encodingLk.Lock() + encodingFallback = fb + encodingLk.Unlock() +} + +// GetEncoding is used by Screen implementors who want to locate an encoding +// for the given character set name. Note that this will return nil for +// either the Unicode (UTF-8) or ASCII encodings, since we don't use +// encodings for them but instead have our own native methods. +func GetEncoding(charset string) encoding.Encoding { + charset = strings.ToLower(charset) + encodingLk.Lock() + defer encodingLk.Unlock() + if enc, ok := encodings[charset]; ok { + return enc + } + switch encodingFallback { + case EncodingFallbackASCII: + return gencoding.ASCII + case EncodingFallbackUTF8: + return encoding.Nop + } + return nil +} + +func init() { + // We always support UTF-8 and ASCII. + encodings = make(map[string]encoding.Encoding) + encodings["utf-8"] = gencoding.UTF8 + encodings["utf8"] = gencoding.UTF8 + encodings["us-ascii"] = gencoding.ASCII + encodings["ascii"] = gencoding.ASCII + encodings["iso646"] = gencoding.ASCII +} diff --git a/vendor/github.com/gdamore/tcell/errors.go b/vendor/github.com/gdamore/tcell/errors.go new file mode 100644 index 00000000000..920b64b255a --- /dev/null +++ b/vendor/github.com/gdamore/tcell/errors.go @@ -0,0 +1,73 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "errors" + "time" + + "github.com/gdamore/tcell/terminfo" +) + +var ( + // ErrTermNotFound indicates that a suitable terminal entry could + // not be found. This can result from either not having TERM set, + // or from the TERM failing to support certain minimal functionality, + // in particular absolute cursor addressability (the cup capability) + // is required. For example, legacy "adm3" lacks this capability, + // whereas the slightly newer "adm3a" supports it. This failure + // occurs most often with "dumb". + ErrTermNotFound = terminfo.ErrTermNotFound + + // ErrNoScreen indicates that no suitable screen could be found. + // This may result from attempting to run on a platform where there + // is no support for either termios or console I/O (such as nacl), + // or from running in an environment where there is no access to + // a suitable console/terminal device. (For example, running on + // without a controlling TTY or with no /dev/tty on POSIX platforms.) + ErrNoScreen = errors.New("no suitable screen available") + + // ErrNoCharset indicates that the locale environment the + // program is not supported by the program, because no suitable + // encoding was found for it. This problem never occurs if + // the environment is UTF-8 or UTF-16. + ErrNoCharset = errors.New("character set not supported") + + // ErrEventQFull indicates that the event queue is full, and + // cannot accept more events. + ErrEventQFull = errors.New("event queue full") +) + +// An EventError is an event representing some sort of error, and carries +// an error payload. +type EventError struct { + t time.Time + err error +} + +// When returns the time when the event was created. +func (ev *EventError) When() time.Time { + return ev.t +} + +// Error implements the error. +func (ev *EventError) Error() string { + return ev.err.Error() +} + +// NewEventError creates an ErrorEvent with the given error payload. +func NewEventError(err error) *EventError { + return &EventError{t: time.Now(), err: err} +} diff --git a/vendor/github.com/gdamore/tcell/event.go b/vendor/github.com/gdamore/tcell/event.go new file mode 100644 index 00000000000..a3b770063be --- /dev/null +++ b/vendor/github.com/gdamore/tcell/event.go @@ -0,0 +1,53 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// Event is a generic interface used for passing around Events. +// Concrete types follow. +type Event interface { + // When reports the time when the event was generated. + When() time.Time +} + +// EventTime is a simple base event class, suitable for easy reuse. +// It can be used to deliver actual timer events as well. +type EventTime struct { + when time.Time +} + +// When returns the time stamp when the event occurred. +func (e *EventTime) When() time.Time { + return e.when +} + +// SetEventTime sets the time of occurrence for the event. +func (e *EventTime) SetEventTime(t time.Time) { + e.when = t +} + +// SetEventNow sets the time of occurrence for the event to the current time. +func (e *EventTime) SetEventNow() { + e.SetEventTime(time.Now()) +} + +// EventHandler is anything that handles events. If the handler has +// consumed the event, it should return true. False otherwise. +type EventHandler interface { + HandleEvent(Event) bool +} diff --git a/vendor/github.com/gdamore/tcell/interrupt.go b/vendor/github.com/gdamore/tcell/interrupt.go new file mode 100644 index 00000000000..70dddfce2f5 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/interrupt.go @@ -0,0 +1,41 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventInterrupt is a generic wakeup event. Its can be used to +// to request a redraw. It can carry an arbitrary payload, as well. +type EventInterrupt struct { + t time.Time + v interface{} +} + +// When returns the time when this event was created. +func (ev *EventInterrupt) When() time.Time { + return ev.t +} + +// Data is used to obtain the opaque event payload. +func (ev *EventInterrupt) Data() interface{} { + return ev.v +} + +// NewEventInterrupt creates an EventInterrupt with the given payload. +func NewEventInterrupt(data interface{}) *EventInterrupt { + return &EventInterrupt{t: time.Now(), v: data} +} diff --git a/vendor/github.com/gdamore/tcell/key.go b/vendor/github.com/gdamore/tcell/key.go new file mode 100644 index 00000000000..3545215acc4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/key.go @@ -0,0 +1,464 @@ +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "fmt" + "strings" + "time" +) + +// EventKey represents a key press. Usually this is a key press followed +// by a key release, but since terminal programs don't have a way to report +// key release events, we usually get just one event. If a key is held down +// then the terminal may synthesize repeated key presses at some predefined +// rate. We have no control over that, nor visibility into it. +// +// In some cases, we can have a modifier key, such as ModAlt, that can be +// generated with a key press. (This usually is represented by having the +// high bit set, or in some cases, by sending an ESC prior to the rune.) +// +// If the value of Key() is KeyRune, then the actual key value will be +// available with the Rune() method. This will be the case for most keys. +// In most situations, the modifiers will not be set. For example, if the +// rune is 'A', this will be reported without the ModShift bit set, since +// really can't tell if the Shift key was pressed (it might have been CAPSLOCK, +// or a terminal that only can send capitals, or keyboard with separate +// capital letters from lower case letters). +// +// Generally, terminal applications have far less visibility into keyboard +// activity than graphical applications. Hence, they should avoid depending +// overly much on availability of modifiers, or the availability of any +// specific keys. +type EventKey struct { + t time.Time + mod ModMask + key Key + ch rune +} + +// When returns the time when this Event was created, which should closely +// match the time when the key was pressed. +func (ev *EventKey) When() time.Time { + return ev.t +} + +// Rune returns the rune corresponding to the key press, if it makes sense. +// The result is only defined if the value of Key() is KeyRune. +func (ev *EventKey) Rune() rune { + return ev.ch +} + +// Key returns a virtual key code. We use this to identify specific key +// codes, such as KeyEnter, etc. Most control and function keys are reported +// with unique Key values. Normal alphanumeric and punctuation keys will +// generally return KeyRune here; the specific key can be further decoded +// using the Rune() function. +func (ev *EventKey) Key() Key { + return ev.key +} + +// Modifiers returns the modifiers that were present with the key press. Note +// that not all platforms and terminals support this equally well, and some +// cases we will not not know for sure. Hence, applications should avoid +// using this in most circumstances. +func (ev *EventKey) Modifiers() ModMask { + return ev.mod +} + +// KeyNames holds the written names of special keys. Useful to echo back a key +// name, or to look up a key from a string value. +var KeyNames = map[Key]string{ + KeyEnter: "Enter", + KeyBackspace: "Backspace", + KeyTab: "Tab", + KeyBacktab: "Backtab", + KeyEsc: "Esc", + KeyBackspace2: "Backspace2", + KeyDelete: "Delete", + KeyInsert: "Insert", + KeyUp: "Up", + KeyDown: "Down", + KeyLeft: "Left", + KeyRight: "Right", + KeyHome: "Home", + KeyEnd: "End", + KeyUpLeft: "UpLeft", + KeyUpRight: "UpRight", + KeyDownLeft: "DownLeft", + KeyDownRight: "DownRight", + KeyCenter: "Center", + KeyPgDn: "PgDn", + KeyPgUp: "PgUp", + KeyClear: "Clear", + KeyExit: "Exit", + KeyCancel: "Cancel", + KeyPause: "Pause", + KeyPrint: "Print", + KeyF1: "F1", + KeyF2: "F2", + KeyF3: "F3", + KeyF4: "F4", + KeyF5: "F5", + KeyF6: "F6", + KeyF7: "F7", + KeyF8: "F8", + KeyF9: "F9", + KeyF10: "F10", + KeyF11: "F11", + KeyF12: "F12", + KeyF13: "F13", + KeyF14: "F14", + KeyF15: "F15", + KeyF16: "F16", + KeyF17: "F17", + KeyF18: "F18", + KeyF19: "F19", + KeyF20: "F20", + KeyF21: "F21", + KeyF22: "F22", + KeyF23: "F23", + KeyF24: "F24", + KeyF25: "F25", + KeyF26: "F26", + KeyF27: "F27", + KeyF28: "F28", + KeyF29: "F29", + KeyF30: "F30", + KeyF31: "F31", + KeyF32: "F32", + KeyF33: "F33", + KeyF34: "F34", + KeyF35: "F35", + KeyF36: "F36", + KeyF37: "F37", + KeyF38: "F38", + KeyF39: "F39", + KeyF40: "F40", + KeyF41: "F41", + KeyF42: "F42", + KeyF43: "F43", + KeyF44: "F44", + KeyF45: "F45", + KeyF46: "F46", + KeyF47: "F47", + KeyF48: "F48", + KeyF49: "F49", + KeyF50: "F50", + KeyF51: "F51", + KeyF52: "F52", + KeyF53: "F53", + KeyF54: "F54", + KeyF55: "F55", + KeyF56: "F56", + KeyF57: "F57", + KeyF58: "F58", + KeyF59: "F59", + KeyF60: "F60", + KeyF61: "F61", + KeyF62: "F62", + KeyF63: "F63", + KeyF64: "F64", + KeyCtrlA: "Ctrl-A", + KeyCtrlB: "Ctrl-B", + KeyCtrlC: "Ctrl-C", + KeyCtrlD: "Ctrl-D", + KeyCtrlE: "Ctrl-E", + KeyCtrlF: "Ctrl-F", + KeyCtrlG: "Ctrl-G", + KeyCtrlJ: "Ctrl-J", + KeyCtrlK: "Ctrl-K", + KeyCtrlL: "Ctrl-L", + KeyCtrlN: "Ctrl-N", + KeyCtrlO: "Ctrl-O", + KeyCtrlP: "Ctrl-P", + KeyCtrlQ: "Ctrl-Q", + KeyCtrlR: "Ctrl-R", + KeyCtrlS: "Ctrl-S", + KeyCtrlT: "Ctrl-T", + KeyCtrlU: "Ctrl-U", + KeyCtrlV: "Ctrl-V", + KeyCtrlW: "Ctrl-W", + KeyCtrlX: "Ctrl-X", + KeyCtrlY: "Ctrl-Y", + KeyCtrlZ: "Ctrl-Z", + KeyCtrlSpace: "Ctrl-Space", + KeyCtrlUnderscore: "Ctrl-_", + KeyCtrlRightSq: "Ctrl-]", + KeyCtrlBackslash: "Ctrl-\\", + KeyCtrlCarat: "Ctrl-^", +} + +// Name returns a printable value or the key stroke. This can be used +// when printing the event, for example. +func (ev *EventKey) Name() string { + s := "" + m := []string{} + if ev.mod&ModShift != 0 { + m = append(m, "Shift") + } + if ev.mod&ModAlt != 0 { + m = append(m, "Alt") + } + if ev.mod&ModMeta != 0 { + m = append(m, "Meta") + } + if ev.mod&ModCtrl != 0 { + m = append(m, "Ctrl") + } + + ok := false + if s, ok = KeyNames[ev.key]; !ok { + if ev.key == KeyRune { + s = "Rune[" + string(ev.ch) + "]" + } else { + s = fmt.Sprintf("Key[%d,%d]", ev.key, int(ev.ch)) + } + } + if len(m) != 0 { + if ev.mod&ModCtrl != 0 && strings.HasPrefix(s, "Ctrl-") { + s = s[5:] + } + return fmt.Sprintf("%s+%s", strings.Join(m, "+"), s) + } + return s +} + +// NewEventKey attempts to create a suitable event. It parses the various +// ASCII control sequences if KeyRune is passed for Key, but if the caller +// has more precise information it should set that specifically. Callers +// that aren't sure about modifier state (most) should just pass ModNone. +func NewEventKey(k Key, ch rune, mod ModMask) *EventKey { + if k == KeyRune && (ch < ' ' || ch == 0x7f) { + // Turn specials into proper key codes. This is for + // control characters and the DEL. + k = Key(ch) + if mod == ModNone && ch < ' ' { + switch Key(ch) { + case KeyBackspace, KeyTab, KeyEsc, KeyEnter: + // these keys are directly typeable without CTRL + default: + // most likely entered with a CTRL keypress + mod = ModCtrl + } + } + } + return &EventKey{t: time.Now(), key: k, ch: ch, mod: mod} +} + +// ModMask is a mask of modifier keys. Note that it will not always be +// possible to report modifier keys. +type ModMask int16 + +// These are the modifiers keys that can be sent either with a key press, +// or a mouse event. Note that as of now, due to the confusion associated +// with Meta, and the lack of support for it on many/most platforms, the +// current implementations never use it. Instead, they use ModAlt, even for +// events that could possibly have been distinguished from ModAlt. +const ( + ModShift ModMask = 1 << iota + ModCtrl + ModAlt + ModMeta + ModNone ModMask = 0 +) + +// Key is a generic value for representing keys, and especially special +// keys (function keys, cursor movement keys, etc.) For normal keys, like +// ASCII letters, we use KeyRune, and then expect the application to +// inspect the Rune() member of the EventKey. +type Key int16 + +// This is the list of named keys. KeyRune is special however, in that it is +// a place holder key indicating that a printable character was sent. The +// actual value of the rune will be transported in the Rune of the associated +// EventKey. +const ( + KeyRune Key = iota + 256 + KeyUp + KeyDown + KeyRight + KeyLeft + KeyUpLeft + KeyUpRight + KeyDownLeft + KeyDownRight + KeyCenter + KeyPgUp + KeyPgDn + KeyHome + KeyEnd + KeyInsert + KeyDelete + KeyHelp + KeyExit + KeyClear + KeyCancel + KeyPrint + KeyPause + KeyBacktab + KeyF1 + KeyF2 + KeyF3 + KeyF4 + KeyF5 + KeyF6 + KeyF7 + KeyF8 + KeyF9 + KeyF10 + KeyF11 + KeyF12 + KeyF13 + KeyF14 + KeyF15 + KeyF16 + KeyF17 + KeyF18 + KeyF19 + KeyF20 + KeyF21 + KeyF22 + KeyF23 + KeyF24 + KeyF25 + KeyF26 + KeyF27 + KeyF28 + KeyF29 + KeyF30 + KeyF31 + KeyF32 + KeyF33 + KeyF34 + KeyF35 + KeyF36 + KeyF37 + KeyF38 + KeyF39 + KeyF40 + KeyF41 + KeyF42 + KeyF43 + KeyF44 + KeyF45 + KeyF46 + KeyF47 + KeyF48 + KeyF49 + KeyF50 + KeyF51 + KeyF52 + KeyF53 + KeyF54 + KeyF55 + KeyF56 + KeyF57 + KeyF58 + KeyF59 + KeyF60 + KeyF61 + KeyF62 + KeyF63 + KeyF64 +) + +// These are the control keys. Note that they overlap with other keys, +// perhaps. For example, KeyCtrlH is the same as KeyBackspace. +const ( + KeyCtrlSpace Key = iota + KeyCtrlA + KeyCtrlB + KeyCtrlC + KeyCtrlD + KeyCtrlE + KeyCtrlF + KeyCtrlG + KeyCtrlH + KeyCtrlI + KeyCtrlJ + KeyCtrlK + KeyCtrlL + KeyCtrlM + KeyCtrlN + KeyCtrlO + KeyCtrlP + KeyCtrlQ + KeyCtrlR + KeyCtrlS + KeyCtrlT + KeyCtrlU + KeyCtrlV + KeyCtrlW + KeyCtrlX + KeyCtrlY + KeyCtrlZ + KeyCtrlLeftSq // Escape + KeyCtrlBackslash + KeyCtrlRightSq + KeyCtrlCarat + KeyCtrlUnderscore +) + +// Special values - these are fixed in an attempt to make it more likely +// that aliases will encode the same way. + +// These are the defined ASCII values for key codes. They generally match +// with KeyCtrl values. +const ( + KeyNUL Key = iota + KeySOH + KeySTX + KeyETX + KeyEOT + KeyENQ + KeyACK + KeyBEL + KeyBS + KeyTAB + KeyLF + KeyVT + KeyFF + KeyCR + KeySO + KeySI + KeyDLE + KeyDC1 + KeyDC2 + KeyDC3 + KeyDC4 + KeyNAK + KeySYN + KeyETB + KeyCAN + KeyEM + KeySUB + KeyESC + KeyFS + KeyGS + KeyRS + KeyUS + KeyDEL Key = 0x7F +) + +// These keys are aliases for other names. +const ( + KeyBackspace = KeyBS + KeyTab = KeyTAB + KeyEsc = KeyESC + KeyEscape = KeyESC + KeyEnter = KeyCR + KeyBackspace2 = KeyDEL +) diff --git a/vendor/github.com/gdamore/tcell/mouse.go b/vendor/github.com/gdamore/tcell/mouse.go new file mode 100644 index 00000000000..8c51c98ea9e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/mouse.go @@ -0,0 +1,97 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventMouse is a mouse event. It is sent on either mouse up or mouse down +// events. It is also sent on mouse motion events - if the terminal supports +// it. We make every effort to ensure that mouse release events are delivered. +// Hence, click drag can be identified by a motion event with the mouse down, +// without any intervening button release. On some terminals only the initiating +// press and terminating release event will be delivered. +// +// Mouse wheel events, when reported, may appear on their own as individual +// impulses; that is, there will normally not be a release event delivered +// for mouse wheel movements. +// +// Most terminals cannot report the state of more than one button at a time -- +// and some cannot report motion events unless a button is pressed. +// +// Applications can inspect the time between events to resolve double or +// triple clicks. +type EventMouse struct { + t time.Time + btn ButtonMask + mod ModMask + x int + y int +} + +// When returns the time when this EventMouse was created. +func (ev *EventMouse) When() time.Time { + return ev.t +} + +// Buttons returns the list of buttons that were pressed or wheel motions. +func (ev *EventMouse) Buttons() ButtonMask { + return ev.btn +} + +// Modifiers returns a list of keyboard modifiers that were pressed +// with the mouse button(s). +func (ev *EventMouse) Modifiers() ModMask { + return ev.mod +} + +// Position returns the mouse position in character cells. The origin +// 0, 0 is at the upper left corner. +func (ev *EventMouse) Position() (int, int) { + return ev.x, ev.y +} + +// NewEventMouse is used to create a new mouse event. Applications +// shouldn't need to use this; its mostly for screen implementors. +func NewEventMouse(x, y int, btn ButtonMask, mod ModMask) *EventMouse { + return &EventMouse{t: time.Now(), x: x, y: y, btn: btn, mod: mod} +} + +// ButtonMask is a mask of mouse buttons and wheel events. Mouse button presses +// are normally delivered as both press and release events. Mouse wheel events +// are normally just single impulse events. Windows supports up to eight +// separate buttons plus all four wheel directions, but XTerm can only support +// mouse buttons 1-3 and wheel up/down. Its not unheard of for terminals +// to support only one or two buttons (think Macs). Old terminals, and true +// emulations (such as vt100) won't support mice at all, of course. +type ButtonMask int16 + +// These are the actual button values. +const ( + Button1 ButtonMask = 1 << iota // Usually left mouse button. + Button2 // Usually the middle mouse button. + Button3 // Usually the right mouse button. + Button4 // Often a side button (thumb/next). + Button5 // Often a side button (thumb/prev). + Button6 + Button7 + Button8 + WheelUp // Wheel motion up/away from user. + WheelDown // Wheel motion down/towards user. + WheelLeft // Wheel motion to left. + WheelRight // Wheel motion to right. + ButtonNone ButtonMask = 0 // No button or wheel events. +) diff --git a/vendor/github.com/gdamore/tcell/resize.go b/vendor/github.com/gdamore/tcell/resize.go new file mode 100644 index 00000000000..0385673c838 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/resize.go @@ -0,0 +1,42 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventResize is sent when the window size changes. +type EventResize struct { + t time.Time + w int + h int +} + +// NewEventResize creates an EventResize with the new updated window size, +// which is given in character cells. +func NewEventResize(width, height int) *EventResize { + return &EventResize{t: time.Now(), w: width, h: height} +} + +// When returns the time when the Event was created. +func (ev *EventResize) When() time.Time { + return ev.t +} + +// Size returns the new window size as width, height in character cells. +func (ev *EventResize) Size() (int, int) { + return ev.w, ev.h +} diff --git a/vendor/github.com/gdamore/tcell/runes.go b/vendor/github.com/gdamore/tcell/runes.go new file mode 100644 index 00000000000..ed9c63b5c1c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/runes.go @@ -0,0 +1,111 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// The names of these constants are chosen to match Terminfo names, +// modulo case, and changing the prefix from ACS_ to Rune. These are +// the runes we provide extra special handling for, with ASCII fallbacks +// for terminals that lack them. +const ( + RuneSterling = '£' + RuneDArrow = '↓' + RuneLArrow = '←' + RuneRArrow = '→' + RuneUArrow = '↑' + RuneBullet = '·' + RuneBoard = '░' + RuneCkBoard = '▒' + RuneDegree = '°' + RuneDiamond = '◆' + RuneGEqual = '≥' + RunePi = 'π' + RuneHLine = '─' + RuneLantern = '§' + RunePlus = '┼' + RuneLEqual = '≤' + RuneLLCorner = '└' + RuneLRCorner = '┘' + RuneNEqual = '≠' + RunePlMinus = '±' + RuneS1 = '⎺' + RuneS3 = '⎻' + RuneS7 = '⎼' + RuneS9 = '⎽' + RuneBlock = '█' + RuneTTee = '┬' + RuneRTee = '┤' + RuneLTee = '├' + RuneBTee = '┴' + RuneULCorner = '┌' + RuneURCorner = '┐' + RuneVLine = '│' +) + +// RuneFallbacks is the default map of fallback strings that will be +// used to replace a rune when no other more appropriate transformation +// is available, and the rune cannot be displayed directly. +// +// New entries may be added to this map over time, as it becomes clear +// that such is desirable. Characters that represent either letters or +// numbers should not be added to this list unless it is certain that +// the meaning will still convey unambiguously. +// +// As an example, it would be appropriate to add an ASCII mapping for +// the full width form of the letter 'A', but it would not be appropriate +// to do so a glyph representing the country China. +// +// Programs that desire richer fallbacks may register additional ones, +// or change or even remove these mappings with Screen.RegisterRuneFallback +// Screen.UnregisterRuneFallback methods. +// +// Note that Unicode is presumed to be able to display all glyphs. +// This is a pretty poor assumption, but there is no easy way to +// figure out which glyphs are supported in a given font. Hence, +// some care in selecting the characters you support in your application +// is still appropriate. +var RuneFallbacks = map[rune]string{ + RuneSterling: "f", + RuneDArrow: "v", + RuneLArrow: "<", + RuneRArrow: ">", + RuneUArrow: "^", + RuneBullet: "o", + RuneBoard: "#", + RuneCkBoard: ":", + RuneDegree: "\\", + RuneDiamond: "+", + RuneGEqual: ">", + RunePi: "*", + RuneHLine: "-", + RuneLantern: "#", + RunePlus: "+", + RuneLEqual: "<", + RuneLLCorner: "+", + RuneLRCorner: "+", + RuneNEqual: "!", + RunePlMinus: "#", + RuneS1: "~", + RuneS3: "-", + RuneS7: "-", + RuneS9: "_", + RuneBlock: "#", + RuneTTee: "+", + RuneRTee: "+", + RuneLTee: "+", + RuneBTee: "+", + RuneULCorner: "+", + RuneURCorner: "+", + RuneVLine: "|", +} diff --git a/vendor/github.com/gdamore/tcell/screen.go b/vendor/github.com/gdamore/tcell/screen.go new file mode 100644 index 00000000000..9551af6d6e9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/screen.go @@ -0,0 +1,212 @@ +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// Screen represents the physical (or emulated) screen. +// This can be a terminal window or a physical console. Platforms implement +// this differerently. +type Screen interface { + // Init initializes the screen for use. + Init() error + + // Fini finalizes the screen also releasing resources. + Fini() + + // Clear erases the screen. The contents of any screen buffers + // will also be cleared. This has the logical effect of + // filling the screen with spaces, using the global default style. + Clear() + + // Fill fills the screen with the given character and style. + Fill(rune, Style) + + // SetCell is an older API, and will be removed. Please use + // SetContent instead; SetCell is implemented in terms of SetContent. + SetCell(x int, y int, style Style, ch ...rune) + + // GetContent returns the contents at the given location. If the + // coordinates are out of range, then the values will be 0, nil, + // StyleDefault. Note that the contents returned are logical contents + // and may not actually be what is displayed, but rather are what will + // be displayed if Show() or Sync() is called. The width is the width + // in screen cells; most often this will be 1, but some East Asian + // characters require two cells. + GetContent(x, y int) (mainc rune, combc []rune, style Style, width int) + + // SetContent sets the contents of the given cell location. If + // the coordinates are out of range, then the operation is ignored. + // + // The first rune is the primary non-zero width rune. The array + // that follows is a possible list of combining characters to append, + // and will usually be nil (no combining characters.) + // + // The results are not displayd until Show() or Sync() is called. + // + // Note that wide (East Asian full width) runes occupy two cells, + // and attempts to place character at next cell to the right will have + // undefined effects. Wide runes that are printed in the + // last column will be replaced with a single width space on output. + SetContent(x int, y int, mainc rune, combc []rune, style Style) + + // SetStyle sets the default style to use when clearing the screen + // or when StyleDefault is specified. If it is also StyleDefault, + // then whatever system/terminal default is relevant will be used. + SetStyle(style Style) + + // ShowCursor is used to display the cursor at a given location. + // If the coordinates -1, -1 are given or are otherwise outside the + // dimensions of the screen, the cursor will be hidden. + ShowCursor(x int, y int) + + // HideCursor is used to hide the cursor. Its an alias for + // ShowCursor(-1, -1). + HideCursor() + + // Size returns the screen size as width, height. This changes in + // response to a call to Clear or Flush. + Size() (int, int) + + // PollEvent waits for events to arrive. Main application loops + // must spin on this to prevent the application from stalling. + // Furthermore, this will return nil if the Screen is finalized. + PollEvent() Event + + // PostEvent tries to post an event into the event stream. This + // can fail if the event queue is full. In that case, the event + // is dropped, and ErrEventQFull is returned. + PostEvent(ev Event) error + + // PostEventWait is like PostEvent, but if the queue is full, it + // blocks until there is space in the queue, making delivery + // reliable. However, it is VERY important that this function + // never be called from within whatever event loop is polling + // with PollEvent(), otherwise a deadlock may arise. + // + // For this reason, when using this function, the use of a + // Goroutine is recommended to ensure no deadlock can occur. + PostEventWait(ev Event) + + // EnableMouse enables the mouse. (If your terminal supports it.) + EnableMouse() + + // DisableMouse disables the mouse. + DisableMouse() + + // HasMouse returns true if the terminal (apparently) supports a + // mouse. Note that the a return value of true doesn't guarantee that + // a mouse/pointing device is present; a false return definitely + // indicates no mouse support is available. + HasMouse() bool + + // Colors returns the number of colors. All colors are assumed to + // use the ANSI color map. If a terminal is monochrome, it will + // return 0. + Colors() int + + // Show makes all the content changes made using SetContent() visible + // on the display. + // + // It does so in the most efficient and least visually disruptive + // manner possible. + Show() + + // Sync works like Show(), but it updates every visible cell on the + // physical display, assuming that it is not synchronized with any + // internal model. This may be both expensive and visually jarring, + // so it should only be used when believed to actually be necessary. + // + // Typically this is called as a result of a user-requested redraw + // (e.g. to clear up on screen corruption caused by some other program), + // or during a resize event. + Sync() + + // CharacterSet returns information about the character set. + // This isn't the full locale, but it does give us the input/output + // character set. Note that this is just for diagnostic purposes, + // we normally translate input/output to/from UTF-8, regardless of + // what the user's environment is. + CharacterSet() string + + // RegisterRuneFallback adds a fallback for runes that are not + // part of the character set -- for example one coudld register + // o as a fallback for ø. This should be done cautiously for + // characters that might be displayed ordinarily in language + // specific text -- characters that could change the meaning of + // of written text would be dangerous. The intention here is to + // facilitate fallback characters in pseudo-graphical applications. + // + // If the terminal has fallbacks already in place via an alternate + // character set, those are used in preference. Also, standard + // fallbacks for graphical characters in the ACSC terminfo string + // are registered implicitly. + + // The display string should be the same width as original rune. + // This makes it possible to register two character replacements + // for full width East Asian characters, for example. + // + // It is recommended that replacement strings consist only of + // 7-bit ASCII, since other characters may not display everywhere. + RegisterRuneFallback(r rune, subst string) + + // UnregisterRuneFallback unmaps a replacement. It will unmap + // the implicit ASCII replacements for alternate characters as well. + // When an unmapped char needs to be displayed, but no suitable + // glyph is available, '?' is emitted instead. It is not possible + // to "disable" the use of alternate characters that are supported + // by your terminal except by changing the terminal database. + UnregisterRuneFallback(r rune) + + // CanDisplay returns true if the given rune can be displayed on + // this screen. Note that this is a best guess effort -- whether + // your fonts support the character or not may be questionable. + // Mostly this is for folks who work outside of Unicode. + // + // If checkFallbacks is true, then if any (possibly imperfect) + // fallbacks are registered, this will return true. This will + // also return true if the terminal can replace the glyph with + // one that is visually indistinguishable from the one requested. + CanDisplay(r rune, checkFallbacks bool) bool + + // Resize does nothing, since its generally not possible to + // ask a screen to resize, but it allows the Screen to implement + // the View interface. + Resize(int, int, int, int) + + // HasKey returns true if the keyboard is believed to have the + // key. In some cases a keyboard may have keys with this name + // but no support for them, while in others a key may be reported + // as supported but not actually be usable (such as some emulators + // that hijack certain keys). Its best not to depend to strictly + // on this function, but it can be used for hinting when building + // menus, displayed hot-keys, etc. Note that KeyRune (literal + // runes) is always true. + HasKey(Key) bool +} + +// NewScreen returns a default Screen suitable for the user's terminal +// environment. +func NewScreen() (Screen, error) { + // First we attempt to obtain a terminfo screen. This should work + // in most places if $TERM is set. + if s, e := NewTerminfoScreen(); s != nil { + return s, nil + + } else if s, _ := NewConsoleScreen(); s != nil { + return s, nil + + } else { + return nil, e + } +} diff --git a/vendor/github.com/gdamore/tcell/simulation.go b/vendor/github.com/gdamore/tcell/simulation.go new file mode 100644 index 00000000000..850a7b3dc8e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/simulation.go @@ -0,0 +1,508 @@ +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "sync" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// NewSimulationScreen returns a SimulationScreen. Note that +// SimulationScreen is also a Screen. +func NewSimulationScreen(charset string) SimulationScreen { + if charset == "" { + charset = "UTF-8" + } + s := &simscreen{charset: charset} + return s +} + +// SimulationScreen represents a screen simulation. This is intended to +// be a superset of normal Screens, but also adds some important interfaces +// for testing. +type SimulationScreen interface { + // InjectKeyBytes injects a stream of bytes corresponding to + // the native encoding (see charset). It turns true if the entire + // set of bytes were processed and delivered as KeyEvents, false + // if any bytes were not fully understood. Any bytes that are not + // fully converted are discarded. + InjectKeyBytes(buf []byte) bool + + // InjectKey injects a key event. The rune is a UTF-8 rune, post + // any translation. + InjectKey(key Key, r rune, mod ModMask) + + // InjectMouse injects a mouse event. + InjectMouse(x, y int, buttons ButtonMask, mod ModMask) + + // SetSize resizes the underlying physical screen. It also causes + // a resize event to be injected during the next Show() or Sync(). + // A new physical contents array will be allocated (with data from + // the old copied), so any prior value obtained with GetContents + // won't be used anymore + SetSize(width, height int) + + // GetContents returns screen contents as an array of + // cells, along with the physical width & height. Note that the + // physical contents will be used until the next time SetSize() + // is called. + GetContents() (cells []SimCell, width int, height int) + + // GetCursor returns the cursor details. + GetCursor() (x int, y int, visible bool) + + Screen +} + +// SimCell represents a simulated screen cell. The purpose of this +// is to track on screen content. +type SimCell struct { + // Bytes is the actual character bytes. Normally this is + // rune data, but it could be be data in another encoding system. + Bytes []byte + + // Style is the style used to display the data. + Style Style + + // Runes is the list of runes, unadulterated, in UTF-8. + Runes []rune +} + +type simscreen struct { + physw int + physh int + fini bool + style Style + evch chan Event + quit chan struct{} + + front []SimCell + back CellBuffer + clear bool + cursorx int + cursory int + cursorvis bool + mouse bool + charset string + encoder transform.Transformer + decoder transform.Transformer + fillchar rune + fillstyle Style + fallback map[rune]string + + sync.Mutex +} + +func (s *simscreen) Init() error { + s.evch = make(chan Event, 10) + s.quit = make(chan struct{}) + s.fillchar = 'X' + s.fillstyle = StyleDefault + s.mouse = false + s.physw = 80 + s.physh = 25 + s.cursorx = -1 + s.cursory = -1 + s.style = StyleDefault + + if enc := GetEncoding(s.charset); enc != nil { + s.encoder = enc.NewEncoder() + s.decoder = enc.NewDecoder() + } else { + return ErrNoCharset + } + + s.front = make([]SimCell, s.physw*s.physh) + s.back.Resize(80, 25) + + // default fallbacks + s.fallback = make(map[rune]string) + for k, v := range RuneFallbacks { + s.fallback[k] = v + } + return nil +} + +func (s *simscreen) Fini() { + s.Lock() + s.fini = true + s.back.Resize(0, 0) + s.Unlock() + if s.quit != nil { + close(s.quit) + } + s.physw = 0 + s.physh = 0 + s.front = nil +} + +func (s *simscreen) SetStyle(style Style) { + s.Lock() + s.style = style + s.Unlock() +} + +func (s *simscreen) Clear() { + s.Fill(' ', s.style) +} + +func (s *simscreen) Fill(r rune, style Style) { + s.Lock() + s.back.Fill(r, style) + s.Unlock() +} + +func (s *simscreen) SetCell(x, y int, style Style, ch ...rune) { + + if len(ch) > 0 { + s.SetContent(x, y, ch[0], ch[1:], style) + } else { + s.SetContent(x, y, ' ', nil, style) + } +} + +func (s *simscreen) SetContent(x, y int, mainc rune, combc []rune, st Style) { + + s.Lock() + s.back.SetContent(x, y, mainc, combc, st) + s.Unlock() +} + +func (s *simscreen) GetContent(x, y int) (rune, []rune, Style, int) { + var mainc rune + var combc []rune + var style Style + var width int + s.Lock() + mainc, combc, style, width = s.back.GetContent(x, y) + s.Unlock() + return mainc, combc, style, width +} + +func (s *simscreen) drawCell(x, y int) int { + + mainc, combc, style, width := s.back.GetContent(x, y) + if !s.back.Dirty(x, y) { + return width + } + if x >= s.physw || y >= s.physh || x < 0 || y < 0 { + return width + } + simc := &s.front[(y*s.physw)+x] + + if style == StyleDefault { + style = s.style + } + simc.Style = style + simc.Runes = append([]rune{mainc}, combc...) + + // now emit runes - taking care to not overrun width with a + // wide character, and to ensure that we emit exactly one regular + // character followed up by any residual combing characters + + simc.Bytes = nil + + if x > s.physw-width { + simc.Runes = []rune{' '} + simc.Bytes = []byte{' '} + return width + } + + lbuf := make([]byte, 12) + ubuf := make([]byte, 12) + nout := 0 + + for _, r := range simc.Runes { + + l := utf8.EncodeRune(ubuf, r) + + nout, _, _ = s.encoder.Transform(lbuf, ubuf[:l], true) + + if nout == 0 || lbuf[0] == '\x1a' { + + // skip combining + + if subst, ok := s.fallback[r]; ok { + simc.Bytes = append(simc.Bytes, + []byte(subst)...) + + } else if r >= ' ' && r <= '~' { + simc.Bytes = append(simc.Bytes, byte(r)) + + } else if simc.Bytes == nil { + simc.Bytes = append(simc.Bytes, '?') + } + } else { + simc.Bytes = append(simc.Bytes, lbuf[:nout]...) + } + } + s.back.SetDirty(x, y, false) + return width +} + +func (s *simscreen) ShowCursor(x, y int) { + s.Lock() + s.cursorx, s.cursory = x, y + s.showCursor() + s.Unlock() +} + +func (s *simscreen) HideCursor() { + s.ShowCursor(-1, -1) +} + +func (s *simscreen) showCursor() { + + x, y := s.cursorx, s.cursory + if x < 0 || y < 0 || x >= s.physw || y >= s.physh { + s.cursorvis = false + } else { + s.cursorvis = true + } +} + +func (s *simscreen) hideCursor() { + // does not update cursor position + s.cursorvis = false +} + +func (s *simscreen) Show() { + s.Lock() + s.resize() + s.draw() + s.Unlock() +} + +func (s *simscreen) clearScreen() { + // We emulate a hardware clear by filling with a specific pattern + for i := range s.front { + s.front[i].Style = s.fillstyle + s.front[i].Runes = []rune{s.fillchar} + s.front[i].Bytes = []byte{byte(s.fillchar)} + } + s.clear = false +} + +func (s *simscreen) draw() { + s.hideCursor() + if s.clear { + s.clearScreen() + } + + w, h := s.back.Size() + for y := 0; y < h; y++ { + for x := 0; x < w; x++ { + width := s.drawCell(x, y) + x += width - 1 + } + } + s.showCursor() +} + +func (s *simscreen) EnableMouse() { + s.mouse = true +} + +func (s *simscreen) DisableMouse() { + s.mouse = false +} + +func (s *simscreen) Size() (int, int) { + s.Lock() + w, h := s.back.Size() + s.Unlock() + return w, h +} + +func (s *simscreen) resize() { + w, h := s.physw, s.physh + ow, oh := s.back.Size() + if w != ow || h != oh { + s.back.Resize(w, h) + ev := NewEventResize(w, h) + s.PostEvent(ev) + } +} + +func (s *simscreen) Colors() int { + return 256 +} + +func (s *simscreen) PollEvent() Event { + select { + case <-s.quit: + return nil + case ev := <-s.evch: + return ev + } +} + +func (s *simscreen) PostEventWait(ev Event) { + s.evch <- ev +} + +func (s *simscreen) PostEvent(ev Event) error { + select { + case s.evch <- ev: + return nil + default: + return ErrEventQFull + } +} + +func (s *simscreen) InjectMouse(x, y int, buttons ButtonMask, mod ModMask) { + ev := NewEventMouse(x, y, buttons, mod) + s.PostEvent(ev) +} + +func (s *simscreen) InjectKey(key Key, r rune, mod ModMask) { + ev := NewEventKey(key, r, mod) + s.PostEvent(ev) +} + +func (s *simscreen) InjectKeyBytes(b []byte) bool { + failed := false + +outer: + for len(b) > 0 { + if b[0] >= ' ' && b[0] <= 0x7F { + // printable ASCII easy to deal with -- no encodings + ev := NewEventKey(KeyRune, rune(b[0]), ModNone) + s.PostEvent(ev) + b = b[1:] + continue + } + + if b[0] < 0x80 { + mod := ModNone + // No encodings start with low numbered values + if Key(b[0]) >= KeyCtrlA && Key(b[0]) <= KeyCtrlZ { + mod = ModCtrl + } + ev := NewEventKey(Key(b[0]), 0, mod) + s.PostEvent(ev) + continue + } + + utfb := make([]byte, len(b)*4) // worst case + for l := 1; l < len(b); l++ { + s.decoder.Reset() + nout, nin, _ := s.decoder.Transform(utfb, b[:l], true) + + if nout != 0 { + r, _ := utf8.DecodeRune(utfb[:nout]) + if r != utf8.RuneError { + ev := NewEventKey(KeyRune, r, ModNone) + s.PostEvent(ev) + } + b = b[nin:] + continue outer + } + } + failed = true + b = b[1:] + continue + } + + return !failed +} + +func (s *simscreen) Sync() { + s.Lock() + s.clear = true + s.resize() + s.back.Invalidate() + s.draw() + s.Unlock() +} + +func (s *simscreen) CharacterSet() string { + return s.charset +} + +func (s *simscreen) SetSize(w, h int) { + s.Lock() + newc := make([]SimCell, w*h) + for row := 0; row < h && row < s.physh; row++ { + for col := 0; col < w && col < s.physw; col++ { + newc[(row*w)+col] = s.front[(row*s.physw)+col] + } + } + s.cursorx, s.cursory = -1, -1 + s.physw, s.physh = w, h + s.front = newc + s.back.Resize(w, h) + s.Unlock() +} + +func (s *simscreen) GetContents() ([]SimCell, int, int) { + s.Lock() + cells, w, h := s.front, s.physw, s.physh + s.Unlock() + return cells, w, h +} + +func (s *simscreen) GetCursor() (int, int, bool) { + s.Lock() + x, y, vis := s.cursorx, s.cursory, s.cursorvis + s.Unlock() + return x, y, vis +} + +func (s *simscreen) RegisterRuneFallback(r rune, subst string) { + s.Lock() + s.fallback[r] = subst + s.Unlock() +} + +func (s *simscreen) UnregisterRuneFallback(r rune) { + s.Lock() + delete(s.fallback, r) + s.Unlock() +} + +func (s *simscreen) CanDisplay(r rune, checkFallbacks bool) bool { + + if enc := s.encoder; enc != nil { + nb := make([]byte, 6) + ob := make([]byte, 6) + num := utf8.EncodeRune(ob, r) + + enc.Reset() + dst, _, err := enc.Transform(nb, ob[:num], true) + if dst != 0 && err == nil && nb[0] != '\x1A' { + return true + } + } + if !checkFallbacks { + return false + } + if _, ok := s.fallback[r]; ok { + return true + } + return false +} + +func (s *simscreen) HasMouse() bool { + return false +} + +func (s *simscreen) Resize(int, int, int, int) {} + +func (s *simscreen) HasKey(Key) bool { + return true +} diff --git a/vendor/github.com/gdamore/tcell/style.go b/vendor/github.com/gdamore/tcell/style.go new file mode 100644 index 00000000000..c4ee93511e5 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/style.go @@ -0,0 +1,126 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// Style represents a complete text style, including both foreground +// and background color. We encode it in a 64-bit int for efficiency. +// The coding is (MSB): <7b flags><1b><24b fgcolor><7b attr><1b><24b bgcolor>. +// The <1b> is set true to indicate that the color is an RGB color, rather +// than a named index. +// +// This gives 24bit color options, if it ever becomes truly necessary. +// However, applications must not rely on this encoding. +// +// Note that not all terminals can display all colors or attributes, and +// many might have specific incompatibilities between specific attributes +// and color combinations. +// +// The intention is to extend styles to support paletting, in which case +// some flag bit(s) would be set, and the foreground and background colors +// would be replaced with a palette number and palette index. +// +// To use Style, just declare a variable of its type. +type Style int64 + +// StyleDefault represents a default style, based upon the context. +// It is the zero value. +const StyleDefault Style = 0 + +// styleFlags -- used internally for now. +const ( + styleBgSet = 1 << (iota + 57) + styleFgSet + stylePalette +) + +// Foreground returns a new style based on s, with the foreground color set +// as requested. ColorDefault can be used to select the global default. +func (s Style) Foreground(c Color) Style { + if c == ColorDefault { + return (s &^ (0x1ffffff00000000 | styleFgSet)) + } + return (s &^ Style(0x1ffffff00000000)) | + ((Style(c) & 0x1ffffff) << 32) | styleFgSet +} + +// Background returns a new style based on s, with the background color set +// as requested. ColorDefault can be used to select the global default. +func (s Style) Background(c Color) Style { + if c == ColorDefault { + return (s &^ (0x1ffffff | styleBgSet)) + } + return (s &^ (0x1ffffff)) | (Style(c) & 0x1ffffff) | styleBgSet +} + +// Decompose breaks a style up, returning the foreground, background, +// and other attributes. +func (s Style) Decompose() (fg Color, bg Color, attr AttrMask) { + if s&styleFgSet != 0 { + fg = Color(s>>32) & 0x1ffffff + } else { + fg = ColorDefault + } + if s&styleBgSet != 0 { + bg = Color(s & 0x1ffffff) + } else { + bg = ColorDefault + } + attr = AttrMask(s) & attrAll + + return fg, bg, attr +} + +func (s Style) setAttrs(attrs Style, on bool) Style { + if on { + return s | attrs + } + return s &^ attrs +} + +// Normal returns the style with all attributes disabled. +func (s Style) Normal() Style { + return s &^ Style(attrAll) +} + +// Bold returns a new style based on s, with the bold attribute set +// as requested. +func (s Style) Bold(on bool) Style { + return s.setAttrs(Style(AttrBold), on) +} + +// Blink returns a new style based on s, with the blink attribute set +// as requested. +func (s Style) Blink(on bool) Style { + return s.setAttrs(Style(AttrBlink), on) +} + +// Dim returns a new style based on s, with the dim attribute set +// as requested. +func (s Style) Dim(on bool) Style { + return s.setAttrs(Style(AttrDim), on) +} + +// Reverse returns a new style based on s, with the reverse attribute set +// as requested. (Reverse usually changes the foreground and background +// colors.) +func (s Style) Reverse(on bool) Style { + return s.setAttrs(Style(AttrReverse), on) +} + +// Underline returns a new style based on s, with the underline attribute set +// as requested. +func (s Style) Underline(on bool) Style { + return s.setAttrs(Style(AttrUnderline), on) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/mkinfo.go b/vendor/github.com/gdamore/tcell/terminfo/mkinfo.go new file mode 100644 index 00000000000..50d70f74ee9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/mkinfo.go @@ -0,0 +1,915 @@ +// +build ignore + +// Copyright 2018 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This command is used to generate suitable configuration files in either +// go syntax or in JSON. It defaults to JSON output on stdout. If no +// term values are specified on the command line, then $TERM is used. +// +// Usage is like this: +// +// mkinfo [-init] [-go file.go] [-json file.json] [-quiet] [-nofatal] [...] +// +// -all scan terminfo to determine database entries to use +// -db generate database entries (database/*), implied for -all +// -gzip specifies output should be compressed (json only) +// -go specifies Go output into the named file. Use - for stdout. +// -json specifies JSON output in the named file. Use - for stdout +// -nofatal indicates that errors loading definitions should not be fatal +// + +package main + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha1" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "os" + "os/exec" + "path" + "regexp" + "strconv" + "strings" + + "github.com/gdamore/tcell/terminfo" +) + +type termcap struct { + name string + desc string + aliases []string + bools map[string]bool + nums map[string]int + strs map[string]string +} + +func (tc *termcap) getnum(s string) int { + return (tc.nums[s]) +} + +func (tc *termcap) getflag(s string) bool { + return (tc.bools[s]) +} + +func (tc *termcap) getstr(s string) string { + return (tc.strs[s]) +} + +const ( + NONE = iota + CTRL + ESC +) + +var notaddressable = errors.New("terminal not cursor addressable") + +func unescape(s string) string { + // Various escapes are in \x format. Control codes are + // encoded as ^M (carat followed by ASCII equivalent). + // Escapes are: \e, \E - escape + // \0 NULL, \n \l \r \t \b \f \s for equivalent C escape. + buf := &bytes.Buffer{} + esc := NONE + + for i := 0; i < len(s); i++ { + c := s[i] + switch esc { + case NONE: + switch c { + case '\\': + esc = ESC + case '^': + esc = CTRL + default: + buf.WriteByte(c) + } + case CTRL: + buf.WriteByte(c - 0x40) + esc = NONE + case ESC: + switch c { + case 'E', 'e': + buf.WriteByte(0x1b) + case '0', '1', '2', '3', '4', '5', '6', '7': + if i+2 < len(s) && s[i+1] >= '0' && s[i+1] <= '7' && s[i+2] >= '0' && s[i+2] <= '7' { + buf.WriteByte(((c - '0') * 64) + ((s[i+1] - '0') * 8) + (s[i+2] - '0')) + i = i + 2 + } else if c == '0' { + buf.WriteByte(0) + } + case 'n': + buf.WriteByte('\n') + case 'r': + buf.WriteByte('\r') + case 't': + buf.WriteByte('\t') + case 'b': + buf.WriteByte('\b') + case 'f': + buf.WriteByte('\f') + case 's': + buf.WriteByte(' ') + case 'l': + panic("WTF: weird format: " + s) + default: + buf.WriteByte(c) + } + esc = NONE + } + } + return (buf.String()) +} + +func getallterms() ([]string, error) { + out := []string{} + cmd := exec.Command("toe", "-a") + output := &bytes.Buffer{} + cmd.Stdout = output + err := cmd.Run() + if err != nil { + return nil, err + } + lines := strings.Split(output.String(), "\n") + for _, l := range lines { + fields := strings.Fields(l) + if len(fields) > 0 { + out = append(out, fields[0]) + } + } + return out, nil +} + +func (tc *termcap) setupterm(name string) error { + cmd := exec.Command("infocmp", "-1", name) + output := &bytes.Buffer{} + cmd.Stdout = output + + tc.strs = make(map[string]string) + tc.bools = make(map[string]bool) + tc.nums = make(map[string]int) + + err := cmd.Run() + if err != nil { + return err + } + + // Now parse the output. + // We get comment lines (starting with "#"), followed by + // a header line that looks like "||...|" + // then capabilities, one per line, starting with a tab and ending + // with a comma and newline. + lines := strings.Split(output.String(), "\n") + for len(lines) > 0 && strings.HasPrefix(lines[0], "#") { + lines = lines[1:] + } + + // Ditch trailing empty last line + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + header := lines[0] + if strings.HasSuffix(header, ",") { + header = header[:len(header)-1] + } + names := strings.Split(header, "|") + tc.name = names[0] + names = names[1:] + if len(names) > 0 { + tc.desc = names[len(names)-1] + names = names[:len(names)-1] + } + tc.aliases = names + for _, val := range lines[1:] { + if (!strings.HasPrefix(val, "\t")) || + (!strings.HasSuffix(val, ",")) { + return (errors.New("malformed infocmp: " + val)) + } + + val = val[1:] + val = val[:len(val)-1] + + if k := strings.SplitN(val, "=", 2); len(k) == 2 { + tc.strs[k[0]] = unescape(k[1]) + } else if k := strings.SplitN(val, "#", 2); len(k) == 2 { + if u, err := strconv.ParseUint(k[1], 0, 0); err != nil { + return (err) + } else { + tc.nums[k[0]] = int(u) + } + } else { + tc.bools[val] = true + } + } + return nil +} + +// This program is used to collect data from the system's terminfo library, +// and write it into Go source code. That is, we maintain our terminfo +// capabilities encoded in the program. It should never need to be run by +// an end user, but developers can use this to add codes for additional +// terminal types. +// +// If a terminal name ending with -truecolor is given, and we cannot find +// one, we will try to fabricate one from either the -256color (if present) +// or the unadorned base name, adding the XTerm specific 24-bit color +// escapes. We believe that all 24-bit capable terminals use the same +// escape sequences, and terminfo has yet to evolve to support this. +func getinfo(name string) (*terminfo.Terminfo, string, error) { + var tc termcap + addTrueColor := false + if err := tc.setupterm(name); err != nil { + if strings.HasSuffix(name, "-truecolor") { + base := name[:len(name)-len("-truecolor")] + // Probably -256color is closest to what we want + if err = tc.setupterm(base + "-256color"); err != nil { + err = tc.setupterm(base) + } + if err == nil { + addTrueColor = true + } + tc.name = name + } + if err != nil { + return nil, "", err + } + } + t := &terminfo.Terminfo{} + // If this is an alias record, then just emit the alias + t.Name = tc.name + if t.Name != name { + return t, "", nil + } + t.Aliases = tc.aliases + t.Colors = tc.getnum("colors") + t.Columns = tc.getnum("cols") + t.Lines = tc.getnum("lines") + t.Bell = tc.getstr("bel") + t.Clear = tc.getstr("clear") + t.EnterCA = tc.getstr("smcup") + t.ExitCA = tc.getstr("rmcup") + t.ShowCursor = tc.getstr("cnorm") + t.HideCursor = tc.getstr("civis") + t.AttrOff = tc.getstr("sgr0") + t.Underline = tc.getstr("smul") + t.Bold = tc.getstr("bold") + t.Blink = tc.getstr("blink") + t.Dim = tc.getstr("dim") + t.Reverse = tc.getstr("rev") + t.EnterKeypad = tc.getstr("smkx") + t.ExitKeypad = tc.getstr("rmkx") + t.SetFg = tc.getstr("setaf") + t.SetBg = tc.getstr("setab") + t.SetCursor = tc.getstr("cup") + t.CursorBack1 = tc.getstr("cub1") + t.CursorUp1 = tc.getstr("cuu1") + t.KeyF1 = tc.getstr("kf1") + t.KeyF2 = tc.getstr("kf2") + t.KeyF3 = tc.getstr("kf3") + t.KeyF4 = tc.getstr("kf4") + t.KeyF5 = tc.getstr("kf5") + t.KeyF6 = tc.getstr("kf6") + t.KeyF7 = tc.getstr("kf7") + t.KeyF8 = tc.getstr("kf8") + t.KeyF9 = tc.getstr("kf9") + t.KeyF10 = tc.getstr("kf10") + t.KeyF11 = tc.getstr("kf11") + t.KeyF12 = tc.getstr("kf12") + t.KeyF13 = tc.getstr("kf13") + t.KeyF14 = tc.getstr("kf14") + t.KeyF15 = tc.getstr("kf15") + t.KeyF16 = tc.getstr("kf16") + t.KeyF17 = tc.getstr("kf17") + t.KeyF18 = tc.getstr("kf18") + t.KeyF19 = tc.getstr("kf19") + t.KeyF20 = tc.getstr("kf20") + t.KeyF21 = tc.getstr("kf21") + t.KeyF22 = tc.getstr("kf22") + t.KeyF23 = tc.getstr("kf23") + t.KeyF24 = tc.getstr("kf24") + t.KeyF25 = tc.getstr("kf25") + t.KeyF26 = tc.getstr("kf26") + t.KeyF27 = tc.getstr("kf27") + t.KeyF28 = tc.getstr("kf28") + t.KeyF29 = tc.getstr("kf29") + t.KeyF30 = tc.getstr("kf30") + t.KeyF31 = tc.getstr("kf31") + t.KeyF32 = tc.getstr("kf32") + t.KeyF33 = tc.getstr("kf33") + t.KeyF34 = tc.getstr("kf34") + t.KeyF35 = tc.getstr("kf35") + t.KeyF36 = tc.getstr("kf36") + t.KeyF37 = tc.getstr("kf37") + t.KeyF38 = tc.getstr("kf38") + t.KeyF39 = tc.getstr("kf39") + t.KeyF40 = tc.getstr("kf40") + t.KeyF41 = tc.getstr("kf41") + t.KeyF42 = tc.getstr("kf42") + t.KeyF43 = tc.getstr("kf43") + t.KeyF44 = tc.getstr("kf44") + t.KeyF45 = tc.getstr("kf45") + t.KeyF46 = tc.getstr("kf46") + t.KeyF47 = tc.getstr("kf47") + t.KeyF48 = tc.getstr("kf48") + t.KeyF49 = tc.getstr("kf49") + t.KeyF50 = tc.getstr("kf50") + t.KeyF51 = tc.getstr("kf51") + t.KeyF52 = tc.getstr("kf52") + t.KeyF53 = tc.getstr("kf53") + t.KeyF54 = tc.getstr("kf54") + t.KeyF55 = tc.getstr("kf55") + t.KeyF56 = tc.getstr("kf56") + t.KeyF57 = tc.getstr("kf57") + t.KeyF58 = tc.getstr("kf58") + t.KeyF59 = tc.getstr("kf59") + t.KeyF60 = tc.getstr("kf60") + t.KeyF61 = tc.getstr("kf61") + t.KeyF62 = tc.getstr("kf62") + t.KeyF63 = tc.getstr("kf63") + t.KeyF64 = tc.getstr("kf64") + t.KeyInsert = tc.getstr("kich1") + t.KeyDelete = tc.getstr("kdch1") + t.KeyBackspace = tc.getstr("kbs") + t.KeyHome = tc.getstr("khome") + t.KeyEnd = tc.getstr("kend") + t.KeyUp = tc.getstr("kcuu1") + t.KeyDown = tc.getstr("kcud1") + t.KeyRight = tc.getstr("kcuf1") + t.KeyLeft = tc.getstr("kcub1") + t.KeyPgDn = tc.getstr("knp") + t.KeyPgUp = tc.getstr("kpp") + t.KeyBacktab = tc.getstr("kcbt") + t.KeyExit = tc.getstr("kext") + t.KeyCancel = tc.getstr("kcan") + t.KeyPrint = tc.getstr("kprt") + t.KeyHelp = tc.getstr("khlp") + t.KeyClear = tc.getstr("kclr") + t.AltChars = tc.getstr("acsc") + t.EnterAcs = tc.getstr("smacs") + t.ExitAcs = tc.getstr("rmacs") + t.EnableAcs = tc.getstr("enacs") + t.Mouse = tc.getstr("kmous") + t.KeyShfRight = tc.getstr("kRIT") + t.KeyShfLeft = tc.getstr("kLFT") + t.KeyShfHome = tc.getstr("kHOM") + t.KeyShfEnd = tc.getstr("kEND") + + // Terminfo lacks descriptions for a bunch of modified keys, + // but modern XTerm and emulators often have them. Let's add them, + // if the shifted right and left arrows are defined. + if t.KeyShfRight == "\x1b[1;2C" && t.KeyShfLeft == "\x1b[1;2D" { + t.KeyShfUp = "\x1b[1;2A" + t.KeyShfDown = "\x1b[1;2B" + t.KeyMetaUp = "\x1b[1;9A" + t.KeyMetaDown = "\x1b[1;9B" + t.KeyMetaRight = "\x1b[1;9C" + t.KeyMetaLeft = "\x1b[1;9D" + t.KeyAltUp = "\x1b[1;3A" + t.KeyAltDown = "\x1b[1;3B" + t.KeyAltRight = "\x1b[1;3C" + t.KeyAltLeft = "\x1b[1;3D" + t.KeyCtrlUp = "\x1b[1;5A" + t.KeyCtrlDown = "\x1b[1;5B" + t.KeyCtrlRight = "\x1b[1;5C" + t.KeyCtrlLeft = "\x1b[1;5D" + t.KeyAltShfUp = "\x1b[1;4A" + t.KeyAltShfDown = "\x1b[1;4B" + t.KeyAltShfRight = "\x1b[1;4C" + t.KeyAltShfLeft = "\x1b[1;4D" + + t.KeyMetaShfUp = "\x1b[1;10A" + t.KeyMetaShfDown = "\x1b[1;10B" + t.KeyMetaShfRight = "\x1b[1;10C" + t.KeyMetaShfLeft = "\x1b[1;10D" + + t.KeyCtrlShfUp = "\x1b[1;6A" + t.KeyCtrlShfDown = "\x1b[1;6B" + t.KeyCtrlShfRight = "\x1b[1;6C" + t.KeyCtrlShfLeft = "\x1b[1;6D" + } + // And also for Home and End + if t.KeyShfHome == "\x1b[1;2H" && t.KeyShfEnd == "\x1b[1;2F" { + t.KeyCtrlHome = "\x1b[1;5H" + t.KeyCtrlEnd = "\x1b[1;5F" + t.KeyAltHome = "\x1b[1;9H" + t.KeyAltEnd = "\x1b[1;9F" + t.KeyCtrlShfHome = "\x1b[1;6H" + t.KeyCtrlShfEnd = "\x1b[1;6F" + t.KeyAltShfHome = "\x1b[1;4H" + t.KeyAltShfEnd = "\x1b[1;4F" + t.KeyMetaShfHome = "\x1b[1;10H" + t.KeyMetaShfEnd = "\x1b[1;10F" + } + + // And the same thing for rxvt and workalikes (Eterm, aterm, etc.) + // It seems that urxvt at least send ESC as ALT prefix for these, + // although some places seem to indicate a separate ALT key sesquence. + if t.KeyShfRight == "\x1b[c" && t.KeyShfLeft == "\x1b[d" { + t.KeyShfUp = "\x1b[a" + t.KeyShfDown = "\x1b[b" + t.KeyCtrlUp = "\x1b[Oa" + t.KeyCtrlDown = "\x1b[Ob" + t.KeyCtrlRight = "\x1b[Oc" + t.KeyCtrlLeft = "\x1b[Od" + } + if t.KeyShfHome == "\x1b[7$" && t.KeyShfEnd == "\x1b[8$" { + t.KeyCtrlHome = "\x1b[7^" + t.KeyCtrlEnd = "\x1b[8^" + } + + // If the kmous entry is present, then we need to record the + // the codes to enter and exit mouse mode. Sadly, this is not + // part of the terminfo databases anywhere that I've found, but + // is an extension. The escape codes are documented in the XTerm + // manual, and all terminals that have kmous are expected to + // use these same codes, unless explicitly configured otherwise + // vi XM. Note that in any event, we only known how to parse either + // x11 or SGR mouse events -- if your terminal doesn't support one + // of these two forms, you maybe out of luck. + t.MouseMode = tc.getstr("XM") + if t.Mouse != "" && t.MouseMode == "" { + // we anticipate that all xterm mouse tracking compatible + // terminals understand mouse tracking (1000), but we hope + // that those that don't understand any-event tracking (1003) + // will at least ignore it. Likewise we hope that terminals + // that don't understand SGR reporting (1006) just ignore it. + t.MouseMode = "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;" + + "\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c" + } + + // We only support colors in ANSI 8 or 256 color mode. + if t.Colors < 8 || t.SetFg == "" { + t.Colors = 0 + } + if t.SetCursor == "" { + return nil, "", notaddressable + } + + // For padding, we lookup the pad char. If that isn't present, + // and npc is *not* set, then we assume a null byte. + t.PadChar = tc.getstr("pad") + if t.PadChar == "" { + if !tc.getflag("npc") { + t.PadChar = "\u0000" + } + } + + // For some terminals we fabricate a -truecolor entry, that may + // not exist in terminfo. + if addTrueColor { + t.SetFgRGB = "\x1b[38;2;%p1%d;%p2%d;%p3%dm" + t.SetBgRGB = "\x1b[48;2;%p1%d;%p2%d;%p3%dm" + t.SetFgBgRGB = "\x1b[38;2;%p1%d;%p2%d;%p3%d;" + + "48;2;%p4%d;%p5%d;%p6%dm" + } + + // For terminals that use "standard" SGR sequences, lets combine the + // foreground and background together. + if strings.HasPrefix(t.SetFg, "\x1b[") && + strings.HasPrefix(t.SetBg, "\x1b[") && + strings.HasSuffix(t.SetFg, "m") && + strings.HasSuffix(t.SetBg, "m") { + fg := t.SetFg[:len(t.SetFg)-1] + r := regexp.MustCompile("%p1") + bg := r.ReplaceAllString(t.SetBg[2:], "%p2") + t.SetFgBg = fg + ";" + bg + } + + return t, tc.desc, nil +} + +func dotGoAddInt(w io.Writer, n string, i int) { + if i == 0 { + // initialized to 0, ignore + return + } + fmt.Fprintf(w, "\t\t%-13s %d,\n", n+":", i) +} +func dotGoAddStr(w io.Writer, n string, s string) { + if s == "" { + return + } + fmt.Fprintf(w, "\t\t%-13s %q,\n", n+":", s) +} + +func dotGoAddArr(w io.Writer, n string, a []string) { + if len(a) == 0 { + return + } + fmt.Fprintf(w, "\t\t%-13s []string{", n+":") + did := false + for _, b := range a { + if did { + fmt.Fprint(w, ", ") + } + did = true + fmt.Fprintf(w, "%q", b) + } + fmt.Fprintln(w, "},") +} + +func dotGoHeader(w io.Writer, packname string) { + fmt.Fprintln(w, "// Generated automatically. DO NOT HAND-EDIT.") + fmt.Fprintln(w, "") + fmt.Fprintf(w, "package %s\n", packname) + fmt.Fprintln(w, "") +} + +func dotGoTrailer(w io.Writer) { +} + +func dotGoInfo(w io.Writer, t *terminfo.Terminfo, desc string) { + + fmt.Fprintln(w, "") + fmt.Fprintln(w, "func init() {") + fmt.Fprintf(w, "\t// %s\n", desc) + fmt.Fprintln(w, "\tAddTerminfo(&Terminfo{") + dotGoAddStr(w, "Name", t.Name) + dotGoAddArr(w, "Aliases", t.Aliases) + dotGoAddInt(w, "Columns", t.Columns) + dotGoAddInt(w, "Lines", t.Lines) + dotGoAddInt(w, "Colors", t.Colors) + dotGoAddStr(w, "Bell", t.Bell) + dotGoAddStr(w, "Clear", t.Clear) + dotGoAddStr(w, "EnterCA", t.EnterCA) + dotGoAddStr(w, "ExitCA", t.ExitCA) + dotGoAddStr(w, "ShowCursor", t.ShowCursor) + dotGoAddStr(w, "HideCursor", t.HideCursor) + dotGoAddStr(w, "AttrOff", t.AttrOff) + dotGoAddStr(w, "Underline", t.Underline) + dotGoAddStr(w, "Bold", t.Bold) + dotGoAddStr(w, "Dim", t.Dim) + dotGoAddStr(w, "Blink", t.Blink) + dotGoAddStr(w, "Reverse", t.Reverse) + dotGoAddStr(w, "EnterKeypad", t.EnterKeypad) + dotGoAddStr(w, "ExitKeypad", t.ExitKeypad) + dotGoAddStr(w, "SetFg", t.SetFg) + dotGoAddStr(w, "SetBg", t.SetBg) + dotGoAddStr(w, "SetFgBg", t.SetFgBg) + dotGoAddStr(w, "PadChar", t.PadChar) + dotGoAddStr(w, "AltChars", t.AltChars) + dotGoAddStr(w, "EnterAcs", t.EnterAcs) + dotGoAddStr(w, "ExitAcs", t.ExitAcs) + dotGoAddStr(w, "EnableAcs", t.EnableAcs) + dotGoAddStr(w, "SetFgRGB", t.SetFgRGB) + dotGoAddStr(w, "SetBgRGB", t.SetBgRGB) + dotGoAddStr(w, "SetFgBgRGB", t.SetFgBgRGB) + dotGoAddStr(w, "Mouse", t.Mouse) + dotGoAddStr(w, "MouseMode", t.MouseMode) + dotGoAddStr(w, "SetCursor", t.SetCursor) + dotGoAddStr(w, "CursorBack1", t.CursorBack1) + dotGoAddStr(w, "CursorUp1", t.CursorUp1) + dotGoAddStr(w, "KeyUp", t.KeyUp) + dotGoAddStr(w, "KeyDown", t.KeyDown) + dotGoAddStr(w, "KeyRight", t.KeyRight) + dotGoAddStr(w, "KeyLeft", t.KeyLeft) + dotGoAddStr(w, "KeyInsert", t.KeyInsert) + dotGoAddStr(w, "KeyDelete", t.KeyDelete) + dotGoAddStr(w, "KeyBackspace", t.KeyBackspace) + dotGoAddStr(w, "KeyHome", t.KeyHome) + dotGoAddStr(w, "KeyEnd", t.KeyEnd) + dotGoAddStr(w, "KeyPgUp", t.KeyPgUp) + dotGoAddStr(w, "KeyPgDn", t.KeyPgDn) + dotGoAddStr(w, "KeyF1", t.KeyF1) + dotGoAddStr(w, "KeyF2", t.KeyF2) + dotGoAddStr(w, "KeyF3", t.KeyF3) + dotGoAddStr(w, "KeyF4", t.KeyF4) + dotGoAddStr(w, "KeyF5", t.KeyF5) + dotGoAddStr(w, "KeyF6", t.KeyF6) + dotGoAddStr(w, "KeyF7", t.KeyF7) + dotGoAddStr(w, "KeyF8", t.KeyF8) + dotGoAddStr(w, "KeyF9", t.KeyF9) + dotGoAddStr(w, "KeyF10", t.KeyF10) + dotGoAddStr(w, "KeyF11", t.KeyF11) + dotGoAddStr(w, "KeyF12", t.KeyF12) + dotGoAddStr(w, "KeyF13", t.KeyF13) + dotGoAddStr(w, "KeyF14", t.KeyF14) + dotGoAddStr(w, "KeyF15", t.KeyF15) + dotGoAddStr(w, "KeyF16", t.KeyF16) + dotGoAddStr(w, "KeyF17", t.KeyF17) + dotGoAddStr(w, "KeyF18", t.KeyF18) + dotGoAddStr(w, "KeyF19", t.KeyF19) + dotGoAddStr(w, "KeyF20", t.KeyF20) + dotGoAddStr(w, "KeyF21", t.KeyF21) + dotGoAddStr(w, "KeyF22", t.KeyF22) + dotGoAddStr(w, "KeyF23", t.KeyF23) + dotGoAddStr(w, "KeyF24", t.KeyF24) + dotGoAddStr(w, "KeyF25", t.KeyF25) + dotGoAddStr(w, "KeyF26", t.KeyF26) + dotGoAddStr(w, "KeyF27", t.KeyF27) + dotGoAddStr(w, "KeyF28", t.KeyF28) + dotGoAddStr(w, "KeyF29", t.KeyF29) + dotGoAddStr(w, "KeyF30", t.KeyF30) + dotGoAddStr(w, "KeyF31", t.KeyF31) + dotGoAddStr(w, "KeyF32", t.KeyF32) + dotGoAddStr(w, "KeyF33", t.KeyF33) + dotGoAddStr(w, "KeyF34", t.KeyF34) + dotGoAddStr(w, "KeyF35", t.KeyF35) + dotGoAddStr(w, "KeyF36", t.KeyF36) + dotGoAddStr(w, "KeyF37", t.KeyF37) + dotGoAddStr(w, "KeyF38", t.KeyF38) + dotGoAddStr(w, "KeyF39", t.KeyF39) + dotGoAddStr(w, "KeyF40", t.KeyF40) + dotGoAddStr(w, "KeyF41", t.KeyF41) + dotGoAddStr(w, "KeyF42", t.KeyF42) + dotGoAddStr(w, "KeyF43", t.KeyF43) + dotGoAddStr(w, "KeyF44", t.KeyF44) + dotGoAddStr(w, "KeyF45", t.KeyF45) + dotGoAddStr(w, "KeyF46", t.KeyF46) + dotGoAddStr(w, "KeyF47", t.KeyF47) + dotGoAddStr(w, "KeyF48", t.KeyF48) + dotGoAddStr(w, "KeyF49", t.KeyF49) + dotGoAddStr(w, "KeyF50", t.KeyF50) + dotGoAddStr(w, "KeyF51", t.KeyF51) + dotGoAddStr(w, "KeyF52", t.KeyF52) + dotGoAddStr(w, "KeyF53", t.KeyF53) + dotGoAddStr(w, "KeyF54", t.KeyF54) + dotGoAddStr(w, "KeyF55", t.KeyF55) + dotGoAddStr(w, "KeyF56", t.KeyF56) + dotGoAddStr(w, "KeyF57", t.KeyF57) + dotGoAddStr(w, "KeyF58", t.KeyF58) + dotGoAddStr(w, "KeyF59", t.KeyF59) + dotGoAddStr(w, "KeyF60", t.KeyF60) + dotGoAddStr(w, "KeyF61", t.KeyF61) + dotGoAddStr(w, "KeyF62", t.KeyF62) + dotGoAddStr(w, "KeyF63", t.KeyF63) + dotGoAddStr(w, "KeyF64", t.KeyF64) + dotGoAddStr(w, "KeyCancel", t.KeyCancel) + dotGoAddStr(w, "KeyPrint", t.KeyPrint) + dotGoAddStr(w, "KeyExit", t.KeyExit) + dotGoAddStr(w, "KeyHelp", t.KeyHelp) + dotGoAddStr(w, "KeyClear", t.KeyClear) + dotGoAddStr(w, "KeyBacktab", t.KeyBacktab) + dotGoAddStr(w, "KeyShfLeft", t.KeyShfLeft) + dotGoAddStr(w, "KeyShfRight", t.KeyShfRight) + dotGoAddStr(w, "KeyShfUp", t.KeyShfUp) + dotGoAddStr(w, "KeyShfDown", t.KeyShfDown) + dotGoAddStr(w, "KeyCtrlLeft", t.KeyCtrlLeft) + dotGoAddStr(w, "KeyCtrlRight", t.KeyCtrlRight) + dotGoAddStr(w, "KeyCtrlUp", t.KeyCtrlUp) + dotGoAddStr(w, "KeyCtrlDown", t.KeyCtrlDown) + dotGoAddStr(w, "KeyMetaLeft", t.KeyMetaLeft) + dotGoAddStr(w, "KeyMetaRight", t.KeyMetaRight) + dotGoAddStr(w, "KeyMetaUp", t.KeyMetaUp) + dotGoAddStr(w, "KeyMetaDown", t.KeyMetaDown) + dotGoAddStr(w, "KeyAltLeft", t.KeyAltLeft) + dotGoAddStr(w, "KeyAltRight", t.KeyAltRight) + dotGoAddStr(w, "KeyAltUp", t.KeyAltUp) + dotGoAddStr(w, "KeyAltDown", t.KeyAltDown) + dotGoAddStr(w, "KeyAltShfLeft", t.KeyAltShfLeft) + dotGoAddStr(w, "KeyAltShfRight", t.KeyAltShfRight) + dotGoAddStr(w, "KeyAltShfUp", t.KeyAltShfUp) + dotGoAddStr(w, "KeyAltShfDown", t.KeyAltShfDown) + dotGoAddStr(w, "KeyMetaShfLeft", t.KeyMetaShfLeft) + dotGoAddStr(w, "KeyMetaShfRight", t.KeyMetaShfRight) + dotGoAddStr(w, "KeyMetaShfUp", t.KeyMetaShfUp) + dotGoAddStr(w, "KeyMetaShfDown", t.KeyMetaShfDown) + dotGoAddStr(w, "KeyCtrlShfLeft", t.KeyCtrlShfLeft) + dotGoAddStr(w, "KeyCtrlShfRight", t.KeyCtrlShfRight) + dotGoAddStr(w, "KeyCtrlShfUp", t.KeyCtrlShfUp) + dotGoAddStr(w, "KeyCtrlShfDown", t.KeyCtrlShfDown) + dotGoAddStr(w, "KeyShfHome", t.KeyShfHome) + dotGoAddStr(w, "KeyShfEnd", t.KeyShfEnd) + dotGoAddStr(w, "KeyCtrlHome", t.KeyCtrlHome) + dotGoAddStr(w, "KeyCtrlEnd", t.KeyCtrlEnd) + dotGoAddStr(w, "KeyMetaHome", t.KeyMetaHome) + dotGoAddStr(w, "KeyMetaEnd", t.KeyMetaEnd) + dotGoAddStr(w, "KeyAltHome", t.KeyAltHome) + dotGoAddStr(w, "KeyAltEnd", t.KeyAltEnd) + dotGoAddStr(w, "KeyCtrlShfHome", t.KeyCtrlShfHome) + dotGoAddStr(w, "KeyCtrlShfEnd", t.KeyCtrlShfEnd) + dotGoAddStr(w, "KeyMetaShfHome", t.KeyMetaShfHome) + dotGoAddStr(w, "KeyMetaShfEnd", t.KeyMetaShfEnd) + dotGoAddStr(w, "KeyAltShfHome", t.KeyAltShfHome) + dotGoAddStr(w, "KeyAltShfEnd", t.KeyAltShfEnd) + fmt.Fprintln(w, "\t})") + fmt.Fprintln(w, "}") +} + +var packname = "terminfo" + +func dotGoFile(fname string, term *terminfo.Terminfo, desc string, makeDir bool) error { + w := os.Stdout + var e error + if fname != "-" && fname != "" { + if makeDir { + dname := path.Dir(fname) + _ = os.Mkdir(dname, 0777) + } + if w, e = os.Create(fname); e != nil { + return e + } + } + dotGoHeader(w, packname) + dotGoInfo(w, term, desc) + dotGoTrailer(w) + if w != os.Stdout { + w.Close() + } + cmd := exec.Command("go", "fmt", fname) + cmd.Run() + return nil +} + +func dotGzFile(fname string, term *terminfo.Terminfo, makeDir bool) error { + + var w io.WriteCloser = os.Stdout + var e error + if fname != "-" && fname != "" { + if makeDir { + dname := path.Dir(fname) + _ = os.Mkdir(dname, 0777) + } + if w, e = os.Create(fname); e != nil { + return e + } + } + + w = gzip.NewWriter(w) + + js, e := json.Marshal(term) + fmt.Fprintln(w, string(js)) + + if w != os.Stdout { + w.Close() + } + return nil +} + +func jsonFile(fname string, term *terminfo.Terminfo, makeDir bool) error { + w := os.Stdout + var e error + if fname != "-" && fname != "" { + if makeDir { + dname := path.Dir(fname) + _ = os.Mkdir(dname, 0777) + } + if w, e = os.Create(fname); e != nil { + return e + } + } + + js, e := json.Marshal(term) + fmt.Fprintln(w, string(js)) + + if w != os.Stdout { + w.Close() + } + return nil +} + +func dumpDatabase(terms map[string]*terminfo.Terminfo, descs map[string]string) { + + // Load models .text + mfile, e := os.Open("models.txt") + models := make(map[string]bool) + if e != nil { + fmt.Fprintf(os.Stderr, "Failed reading models.txt: %v", e) + } + scanner := bufio.NewScanner(mfile) + for scanner.Scan() { + models[scanner.Text()] = true + } + + for name, t := range terms { + + // If this is one of our builtin models, generate the GO file + if models[name] { + desc := descs[name] + safename := strings.Replace(name, "-", "_", -1) + goname := fmt.Sprintf("term_%s.go", safename) + e = dotGoFile(goname, t, desc, true) + if e != nil { + fmt.Fprintf(os.Stderr, "Failed creating %s: %v", goname, e) + os.Exit(1) + } + continue + } + + hash := fmt.Sprintf("%x", sha1.Sum([]byte(name))) + fname := fmt.Sprintf("%s.gz", hash[0:8]) + fname = path.Join("database", hash[0:2], fname) + e = dotGzFile(fname, t, true) + if e != nil { + fmt.Fprintf(os.Stderr, "Failed creating %s: %v", fname, e) + os.Exit(1) + } + + for _, a := range t.Aliases { + hash = fmt.Sprintf("%x", sha1.Sum([]byte(a))) + fname = path.Join("database", hash[0:2], hash[0:8]) + e = jsonFile(fname, &terminfo.Terminfo{Name: t.Name}, true) + if e != nil { + fmt.Fprintf(os.Stderr, "Failed creating %s: %v", fname, e) + os.Exit(1) + } + } + } +} + +func main() { + gofile := "" + jsonfile := "" + nofatal := false + quiet := false + dogzip := false + all := false + db := false + + flag.StringVar(&gofile, "go", "", "generate go source in named file") + flag.StringVar(&jsonfile, "json", "", "generate json in named file") + flag.StringVar(&packname, "P", packname, "package name (go source)") + flag.BoolVar(&nofatal, "nofatal", false, "errors are not fatal") + flag.BoolVar(&quiet, "quiet", false, "suppress error messages") + flag.BoolVar(&dogzip, "gzip", false, "compress json output") + flag.BoolVar(&all, "all", false, "load all terminals from terminfo") + flag.BoolVar(&db, "db", false, "generate json db file in place") + flag.Parse() + var e error + + args := flag.Args() + if all { + db = true // implied + allterms, e := getallterms() + if e != nil { + fmt.Fprintf(os.Stderr, "Failed: %v", e) + os.Exit(1) + } + args = append(args, allterms...) + } + if len(args) == 0 { + args = []string{os.Getenv("TERM")} + } + + tdata := make(map[string]*terminfo.Terminfo) + descs := make(map[string]string) + + for _, term := range args { + if t, desc, e := getinfo(term); e != nil { + if all && e == notaddressable { + continue + } + if !quiet { + fmt.Fprintf(os.Stderr, + "Failed loading %s: %v\n", term, e) + } + if !nofatal { + os.Exit(1) + } + } else { + tdata[term] = t + descs[term] = desc + } + } + + if len(tdata) == 0 { + // No data. + os.Exit(0) + } + + if db { + dumpDatabase(tdata, descs) + } else if gofile != "" { + for term, t := range tdata { + if t.Name == term { + e = dotGoFile(gofile, t, descs[term], false) + if e != nil { + fmt.Fprintf(os.Stderr, "Failed %s: %v", gofile, e) + os.Exit(1) + } + } + } + + } else { + for _, t := range tdata { + if dogzip { + if e = dotGzFile(jsonfile, t, false); e != nil { + fmt.Fprintf(os.Stderr, "Failed %s: %v", gofile, e) + os.Exit(1) + } + } else { + if e = jsonFile(jsonfile, t, false); e != nil { + fmt.Fprintf(os.Stderr, "Failed %s: %v", gofile, e) + os.Exit(1) + } + } + } + } +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_adm3a.go b/vendor/github.com/gdamore/tcell/terminfo/term_adm3a.go new file mode 100644 index 00000000000..d8709dacea8 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_adm3a.go @@ -0,0 +1,22 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // lsi adm3a + AddTerminfo(&Terminfo{ + Name: "adm3a", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1a$<1/>", + PadChar: "\x00", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_aixterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_aixterm.go new file mode 100644 index 00000000000..e1d2146a095 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_aixterm.go @@ -0,0 +1,76 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // IBM Aixterm Terminal Emulator + AddTerminfo(&Terminfo{ + Name: "aixterm", + Columns: 80, + Lines: 25, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "jjkkllmmnnqqttuuvvwwxx", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[139q", + KeyDelete: "\x1b[P", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + KeyEnd: "\x1b[146q", + KeyPgUp: "\x1b[150q", + KeyPgDn: "\x1b[154q", + KeyF1: "\x1b[001q", + KeyF2: "\x1b[002q", + KeyF3: "\x1b[003q", + KeyF4: "\x1b[004q", + KeyF5: "\x1b[005q", + KeyF6: "\x1b[006q", + KeyF7: "\x1b[007q", + KeyF8: "\x1b[008q", + KeyF9: "\x1b[009q", + KeyF10: "\x1b[010q", + KeyF11: "\x1b[011q", + KeyF12: "\x1b[012q", + KeyF13: "\x1b[013q", + KeyF14: "\x1b[014q", + KeyF15: "\x1b[015q", + KeyF16: "\x1b[016q", + KeyF17: "\x1b[017q", + KeyF18: "\x1b[018q", + KeyF19: "\x1b[019q", + KeyF20: "\x1b[020q", + KeyF21: "\x1b[021q", + KeyF22: "\x1b[022q", + KeyF23: "\x1b[023q", + KeyF24: "\x1b[024q", + KeyF25: "\x1b[025q", + KeyF26: "\x1b[026q", + KeyF27: "\x1b[027q", + KeyF28: "\x1b[028q", + KeyF29: "\x1b[029q", + KeyF30: "\x1b[030q", + KeyF31: "\x1b[031q", + KeyF32: "\x1b[032q", + KeyF33: "\x1b[033q", + KeyF34: "\x1b[034q", + KeyF35: "\x1b[035q", + KeyF36: "\x1b[036q", + KeyClear: "\x1b[144q", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_ansi.go b/vendor/github.com/gdamore/tcell/terminfo/term_ansi.go new file mode 100644 index 00000000000..a7909931fb4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_ansi.go @@ -0,0 +1,38 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // ansi/pc-term compatible with color + AddTerminfo(&Terminfo{ + Name: "ansi", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[11m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\x1b[D", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[L", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_aterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_aterm.go new file mode 100644 index 00000000000..77177a22e5d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_aterm.go @@ -0,0 +1,107 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // AfterStep terminal + AddTerminfo(&Terminfo{ + Name: "aterm", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyF21: "\x1b[23$", + KeyF22: "\x1b[24$", + KeyF23: "\x1b[11^", + KeyF24: "\x1b[12^", + KeyF25: "\x1b[13^", + KeyF26: "\x1b[14^", + KeyF27: "\x1b[15^", + KeyF28: "\x1b[17^", + KeyF29: "\x1b[18^", + KeyF30: "\x1b[19^", + KeyF31: "\x1b[20^", + KeyF32: "\x1b[21^", + KeyF33: "\x1b[23^", + KeyF34: "\x1b[24^", + KeyF35: "\x1b[25^", + KeyF36: "\x1b[26^", + KeyF37: "\x1b[28^", + KeyF38: "\x1b[29^", + KeyF39: "\x1b[31^", + KeyF40: "\x1b[32^", + KeyF41: "\x1b[33^", + KeyF42: "\x1b[34^", + KeyF43: "\x1b[23@", + KeyF44: "\x1b[24@", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_beterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_beterm.go new file mode 100644 index 00000000000..de1b5d9fe97 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_beterm.go @@ -0,0 +1,51 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // BeOS Terminal + AddTerminfo(&Terminfo{ + Name: "beterm", + Columns: 80, + Lines: 25, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?4h", + ExitKeypad: "\x1b[?4l", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[16~", + KeyF7: "\x1b[17~", + KeyF8: "\x1b[18~", + KeyF9: "\x1b[19~", + KeyF10: "\x1b[20~", + KeyF11: "\x1b[21~", + KeyF12: "\x1b[22~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_bsdos_pc.go b/vendor/github.com/gdamore/tcell/terminfo/term_bsdos_pc.go new file mode 100644 index 00000000000..c0561261a13 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_bsdos_pc.go @@ -0,0 +1,39 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // IBM PC BSD/OS Console + AddTerminfo(&Terminfo{ + Name: "bsdos-pc", + Columns: 80, + Lines: 25, + Colors: 8, + Bell: "\a", + Clear: "\x1bc", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[11m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[L", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + KeyPgUp: "\x1b[I", + KeyPgDn: "\x1b[G", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_cygwin.go b/vendor/github.com/gdamore/tcell/terminfo/term_cygwin.go new file mode 100644 index 00000000000..568bbe14f1c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_cygwin.go @@ -0,0 +1,60 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // ansi emulation for Cygwin + AddTerminfo(&Terminfo{ + Name: "cygwin", + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[11m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[[A", + KeyF2: "\x1b[[B", + KeyF3: "\x1b[[C", + KeyF4: "\x1b[[D", + KeyF5: "\x1b[[E", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_d200.go b/vendor/github.com/gdamore/tcell/terminfo/term_d200.go new file mode 100644 index 00000000000..611f6ee8578 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_d200.go @@ -0,0 +1,94 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Data General DASHER D200 + AddTerminfo(&Terminfo{ + Name: "d200", + Aliases: []string{"d200-dg"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\f", + AttrOff: "\x0f\x15\x1d\x1eE", + Underline: "\x14", + Bold: "\x1eD\x14", + Dim: "\x1c", + Blink: "\x0e", + Reverse: "\x1eD", + PadChar: "\x00", + SetCursor: "\x10%p2%c%p1%c", + CursorBack1: "\x19", + CursorUp1: "\x17", + KeyUp: "\x17", + KeyDown: "\x1a", + KeyRight: "\x18", + KeyLeft: "\x19", + KeyHome: "\b", + KeyF1: "\x1eq", + KeyF2: "\x1er", + KeyF3: "\x1es", + KeyF4: "\x1et", + KeyF5: "\x1eu", + KeyF6: "\x1ev", + KeyF7: "\x1ew", + KeyF8: "\x1ex", + KeyF9: "\x1ey", + KeyF10: "\x1ez", + KeyF11: "\x1e{", + KeyF12: "\x1e|", + KeyF13: "\x1e}", + KeyF14: "\x1e~", + KeyF15: "\x1ep", + KeyF16: "\x1ea", + KeyF17: "\x1eb", + KeyF18: "\x1ec", + KeyF19: "\x1ed", + KeyF20: "\x1ee", + KeyF21: "\x1ef", + KeyF22: "\x1eg", + KeyF23: "\x1eh", + KeyF24: "\x1ei", + KeyF25: "\x1ej", + KeyF26: "\x1ek", + KeyF27: "\x1el", + KeyF28: "\x1em", + KeyF29: "\x1en", + KeyF30: "\x1e`", + KeyF31: "\x1e1", + KeyF32: "\x1e2", + KeyF33: "\x1e3", + KeyF34: "\x1e4", + KeyF35: "\x1e5", + KeyF36: "\x1e6", + KeyF37: "\x1e7", + KeyF38: "\x1e8", + KeyF39: "\x1e9", + KeyF40: "\x1e:", + KeyF41: "\x1e;", + KeyF42: "\x1e<", + KeyF43: "\x1e=", + KeyF44: "\x1e>", + KeyF45: "\x1e0", + KeyF46: "\x1e!", + KeyF47: "\x1e\"", + KeyF48: "\x1e#", + KeyF49: "\x1e$", + KeyF50: "\x1e%%", + KeyF51: "\x1e&", + KeyF52: "\x1e'", + KeyF53: "\x1e(", + KeyF54: "\x1e)", + KeyF55: "\x1e*", + KeyF56: "\x1e+", + KeyF57: "\x1e,", + KeyF58: "\x1e-", + KeyF59: "\x1e.", + KeyF60: "\x1e ", + KeyClear: "\f", + KeyShfLeft: "\x1e\x19", + KeyShfRight: "\x1e\x18", + KeyShfHome: "\x1e\b", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_d210.go b/vendor/github.com/gdamore/tcell/terminfo/term_d210.go new file mode 100644 index 00000000000..87e0662d09a --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_d210.go @@ -0,0 +1,92 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Data General DASHER D210 series + AddTerminfo(&Terminfo{ + Name: "d210", + Aliases: []string{"d214"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[2J", + AttrOff: "\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[4;7m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyHome: "\x1b[H", + KeyF1: "\x1b[001z", + KeyF2: "\x1b[002z", + KeyF3: "\x1b[003z", + KeyF4: "\x1b[004z", + KeyF5: "\x1b[005z", + KeyF6: "\x1b[006z", + KeyF7: "\x1b[007z", + KeyF8: "\x1b[008z", + KeyF9: "\x1b[009z", + KeyF10: "\x1b[010z", + KeyF11: "\x1b[011z", + KeyF12: "\x1b[012z", + KeyF13: "\x1b[013z", + KeyF14: "\x1b[014z", + KeyF15: "\x1b[000z", + KeyF16: "\x1b[101z", + KeyF17: "\x1b[102z", + KeyF18: "\x1b[103z", + KeyF19: "\x1b[104z", + KeyF20: "\x1b[105z", + KeyF21: "\x1b[106z", + KeyF22: "\x1b[107z", + KeyF23: "\x1b[108z", + KeyF24: "\x1b[109z", + KeyF25: "\x1b[110z", + KeyF26: "\x1b[111z", + KeyF27: "\x1b[112z", + KeyF28: "\x1b[113z", + KeyF29: "\x1b[114z", + KeyF30: "\x1b[100z", + KeyF31: "\x1b[201z", + KeyF32: "\x1b[202z", + KeyF33: "\x1b[203z", + KeyF34: "\x1b[204z", + KeyF35: "\x1b[205z", + KeyF36: "\x1b[206z", + KeyF37: "\x1b[207z", + KeyF38: "\x1b[208z", + KeyF39: "\x1b[209z", + KeyF40: "\x1b[210z", + KeyF41: "\x1b[211z", + KeyF42: "\x1b[212z", + KeyF43: "\x1b[213z", + KeyF44: "\x1b[214z", + KeyF45: "\x1b[200z", + KeyF46: "\x1b[301z", + KeyF47: "\x1b[302z", + KeyF48: "\x1b[303z", + KeyF49: "\x1b[304z", + KeyF50: "\x1b[305z", + KeyF51: "\x1b[306z", + KeyF52: "\x1b[307z", + KeyF53: "\x1b[308z", + KeyF54: "\x1b[309z", + KeyF55: "\x1b[310z", + KeyF56: "\x1b[311z", + KeyF57: "\x1b[312z", + KeyF58: "\x1b[313z", + KeyF59: "\x1b[314z", + KeyF60: "\x1b[300z", + KeyPrint: "\x1b[i", + KeyClear: "\x1b[2J", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_dtterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_dtterm.go new file mode 100644 index 00000000000..9c563c50a1b --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_dtterm.go @@ -0,0 +1,64 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // CDE desktop terminal + AddTerminfo(&Terminfo{ + Name: "dtterm", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyHelp: "\x1b[28~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_gnome.go b/vendor/github.com/gdamore/tcell/terminfo/term_gnome.go new file mode 100644 index 00000000000..d7907f4cdc9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_gnome.go @@ -0,0 +1,154 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // GNOME Terminal + AddTerminfo(&Terminfo{ + Name: "gnome", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1bO1;2P", + KeyF14: "\x1bO1;2Q", + KeyF15: "\x1bO1;2R", + KeyF16: "\x1bO1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1bO1;5P", + KeyF26: "\x1bO1;5Q", + KeyF27: "\x1bO1;5R", + KeyF28: "\x1bO1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1bO1;6P", + KeyF38: "\x1bO1;6Q", + KeyF39: "\x1bO1;6R", + KeyF40: "\x1bO1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1bO1;3P", + KeyF50: "\x1bO1;3Q", + KeyF51: "\x1bO1;3R", + KeyF52: "\x1bO1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1bO1;4P", + KeyF62: "\x1bO1;4Q", + KeyF63: "\x1bO1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_gnome_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_gnome_256color.go new file mode 100644 index 00000000000..342699d4365 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_gnome_256color.go @@ -0,0 +1,154 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // GNOME Terminal with xterm 256-colors + AddTerminfo(&Terminfo{ + Name: "gnome-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_hpterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_hpterm.go new file mode 100644 index 00000000000..0fa30bc6bf0 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_hpterm.go @@ -0,0 +1,47 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // hp X11 terminal emulator + AddTerminfo(&Terminfo{ + Name: "hpterm", + Aliases: []string{"X-hpterm"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b&a0y0C\x1bJ", + AttrOff: "\x1b&d@", + Underline: "\x1b&dD", + Bold: "\x1b&dB", + Dim: "\x1b&dH", + Reverse: "\x1b&dB", + EnterKeypad: "\x1b&s1A", + ExitKeypad: "\x1b&s0A", + PadChar: "\x00", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + SetCursor: "\x1b&a%p1%dy%p2%dC", + CursorBack1: "\b", + CursorUp1: "\x1bA", + KeyUp: "\x1bA", + KeyDown: "\x1bB", + KeyRight: "\x1bC", + KeyLeft: "\x1bD", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bP", + KeyBackspace: "\b", + KeyHome: "\x1bh", + KeyPgUp: "\x1bV", + KeyPgDn: "\x1bU", + KeyF1: "\x1bp", + KeyF2: "\x1bq", + KeyF3: "\x1br", + KeyF4: "\x1bs", + KeyF5: "\x1bt", + KeyF6: "\x1bu", + KeyF7: "\x1bv", + KeyF8: "\x1bw", + KeyClear: "\x1bJ", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_hz1500.go b/vendor/github.com/gdamore/tcell/terminfo/term_hz1500.go new file mode 100644 index 00000000000..34ef6efafd9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_hz1500.go @@ -0,0 +1,23 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // hazeltine 1500 + AddTerminfo(&Terminfo{ + Name: "hz1500", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "~\x1c", + PadChar: "\x00", + SetCursor: "~\x11%p2%p2%?%{30}%>%t%' '%+%;%'`'%+%c%p1%'`'%+%c", + CursorBack1: "\b", + CursorUp1: "~\f", + KeyUp: "~\f", + KeyDown: "\n", + KeyRight: "\x10", + KeyLeft: "\b", + KeyHome: "~\x12", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_konsole.go b/vendor/github.com/gdamore/tcell/terminfo/term_konsole.go new file mode 100644 index 00000000000..5c4e4ec44e3 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_konsole.go @@ -0,0 +1,112 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // KDE console window + AddTerminfo(&Terminfo{ + Name: "konsole", + Columns: 80, + Lines: 24, + Colors: 8, + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1bO2P", + KeyF14: "\x1bO2Q", + KeyF15: "\x1bO2R", + KeyF16: "\x1bO2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1bO5P", + KeyF26: "\x1bO5Q", + KeyF27: "\x1bO5R", + KeyF28: "\x1bO5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1bO6P", + KeyF38: "\x1bO6Q", + KeyF39: "\x1bO6R", + KeyF40: "\x1bO6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1bO3P", + KeyF50: "\x1bO3Q", + KeyF51: "\x1bO3R", + KeyF52: "\x1bO3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1bO4P", + KeyF62: "\x1bO4Q", + KeyF63: "\x1bO4R", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_konsole_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_konsole_256color.go new file mode 100644 index 00000000000..cbe0314d02f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_konsole_256color.go @@ -0,0 +1,112 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // KDE console window with xterm 256-colors + AddTerminfo(&Terminfo{ + Name: "konsole-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1bO2P", + KeyF14: "\x1bO2Q", + KeyF15: "\x1bO2R", + KeyF16: "\x1bO2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1bO5P", + KeyF26: "\x1bO5Q", + KeyF27: "\x1bO5R", + KeyF28: "\x1bO5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1bO6P", + KeyF38: "\x1bO6Q", + KeyF39: "\x1bO6R", + KeyF40: "\x1bO6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1bO3P", + KeyF50: "\x1bO3Q", + KeyF51: "\x1bO3R", + KeyF52: "\x1bO3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1bO4P", + KeyF62: "\x1bO4Q", + KeyF63: "\x1bO4R", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_kterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_kterm.go new file mode 100644 index 00000000000..a7ec2f1037c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_kterm.go @@ -0,0 +1,64 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // kterm kanji terminal emulator (X window system) + AddTerminfo(&Terminfo{ + Name: "kterm", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aajjkkllmmnnooppqqrrssttuuvvwwxx~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_linux.go b/vendor/github.com/gdamore/tcell/terminfo/term_linux.go new file mode 100644 index 00000000000..a3d18720bf3 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_linux.go @@ -0,0 +1,66 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // linux console + AddTerminfo(&Terminfo{ + Name: "linux", + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + ShowCursor: "\x1b[?25h\x1b[?0c", + HideCursor: "\x1b[?25l\x1b[?1c", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "++,,--..00__``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}c~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[[A", + KeyF2: "\x1b[[B", + KeyF3: "\x1b[[C", + KeyF4: "\x1b[[D", + KeyF5: "\x1b[[E", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_pcansi.go b/vendor/github.com/gdamore/tcell/terminfo/term_pcansi.go new file mode 100644 index 00000000000..270ee459b0a --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_pcansi.go @@ -0,0 +1,36 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // ibm-pc terminal programs claiming to be ansi + AddTerminfo(&Terminfo{ + Name: "pcansi", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[12m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\x1b[D", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_rxvt.go b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt.go new file mode 100644 index 00000000000..9ce8e805f84 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt.go @@ -0,0 +1,107 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // rxvt terminal emulator (X Window System) + AddTerminfo(&Terminfo{ + Name: "rxvt", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyF21: "\x1b[23$", + KeyF22: "\x1b[24$", + KeyF23: "\x1b[11^", + KeyF24: "\x1b[12^", + KeyF25: "\x1b[13^", + KeyF26: "\x1b[14^", + KeyF27: "\x1b[15^", + KeyF28: "\x1b[17^", + KeyF29: "\x1b[18^", + KeyF30: "\x1b[19^", + KeyF31: "\x1b[20^", + KeyF32: "\x1b[21^", + KeyF33: "\x1b[23^", + KeyF34: "\x1b[24^", + KeyF35: "\x1b[25^", + KeyF36: "\x1b[26^", + KeyF37: "\x1b[28^", + KeyF38: "\x1b[29^", + KeyF39: "\x1b[31^", + KeyF40: "\x1b[32^", + KeyF41: "\x1b[33^", + KeyF42: "\x1b[34^", + KeyF43: "\x1b[23@", + KeyF44: "\x1b[24@", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_256color.go new file mode 100644 index 00000000000..d2cd3108c9b --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_256color.go @@ -0,0 +1,107 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // rxvt 2.7.9 with xterm 256-colors + AddTerminfo(&Terminfo{ + Name: "rxvt-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyF21: "\x1b[23$", + KeyF22: "\x1b[24$", + KeyF23: "\x1b[11^", + KeyF24: "\x1b[12^", + KeyF25: "\x1b[13^", + KeyF26: "\x1b[14^", + KeyF27: "\x1b[15^", + KeyF28: "\x1b[17^", + KeyF29: "\x1b[18^", + KeyF30: "\x1b[19^", + KeyF31: "\x1b[20^", + KeyF32: "\x1b[21^", + KeyF33: "\x1b[23^", + KeyF34: "\x1b[24^", + KeyF35: "\x1b[25^", + KeyF36: "\x1b[26^", + KeyF37: "\x1b[28^", + KeyF38: "\x1b[29^", + KeyF39: "\x1b[31^", + KeyF40: "\x1b[32^", + KeyF41: "\x1b[33^", + KeyF42: "\x1b[34^", + KeyF43: "\x1b[23@", + KeyF44: "\x1b[24@", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_unicode.go b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_unicode.go new file mode 100644 index 00000000000..3aebf6b58fc --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_unicode.go @@ -0,0 +1,81 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // rxvt-unicode terminal (X Window System) + AddTerminfo(&Terminfo{ + Name: "rxvt-unicode", + Columns: 80, + Lines: 24, + Colors: 88, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[r\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[38;5;%p1%dm", + SetBg: "\x1b[48;5;%p1%dm", + SetFgBg: "\x1b[38;5;%p1%d;48;5;%p2%dm", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_unicode_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_unicode_256color.go new file mode 100644 index 00000000000..276f580847f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_rxvt_unicode_256color.go @@ -0,0 +1,81 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // rxvt-unicode terminal with 256 colors (X Window System) + AddTerminfo(&Terminfo{ + Name: "rxvt-unicode-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[r\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[38;5;%p1%dm", + SetBg: "\x1b[48;5;%p1%dm", + SetFgBg: "\x1b[38;5;%p1%d;48;5;%p2%dm", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_screen.go b/vendor/github.com/gdamore/tcell/terminfo/term_screen.go new file mode 100644 index 00000000000..d9dca02b622 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_screen.go @@ -0,0 +1,64 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // VT 100/ANSI X3.64 virtual terminal + AddTerminfo(&Terminfo{ + Name: "screen", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_screen_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_screen_256color.go new file mode 100644 index 00000000000..40fda2269f6 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_screen_256color.go @@ -0,0 +1,64 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // GNU Screen with 256 colors + AddTerminfo(&Terminfo{ + Name: "screen-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + PadChar: "\x00", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_st.go b/vendor/github.com/gdamore/tcell/terminfo/term_st.go new file mode 100644 index 00000000000..272af39c9d2 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_st.go @@ -0,0 +1,156 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // simpleterm 0.4.1 + AddTerminfo(&Terminfo{ + Name: "st", + Aliases: []string{"stterm"}, + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_st_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_st_256color.go new file mode 100644 index 00000000000..9b4256f3a3f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_st_256color.go @@ -0,0 +1,156 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // simpleterm with 256 colors + AddTerminfo(&Terminfo{ + Name: "st-256color", + Aliases: []string{"stterm-256color"}, + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_st_meta.go b/vendor/github.com/gdamore/tcell/terminfo/term_st_meta.go new file mode 100644 index 00000000000..1fd5f144fad --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_st_meta.go @@ -0,0 +1,155 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // simpleterm with meta key + AddTerminfo(&Terminfo{ + Name: "st-meta", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_st_meta_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_st_meta_256color.go new file mode 100644 index 00000000000..5f181b2ab8c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_st_meta_256color.go @@ -0,0 +1,155 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // simpleterm with meta key and 256 colors + AddTerminfo(&Terminfo{ + Name: "st-meta-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_st_meta_truecolor.go b/vendor/github.com/gdamore/tcell/terminfo/term_st_meta_truecolor.go new file mode 100644 index 00000000000..fc53a97f8ea --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_st_meta_truecolor.go @@ -0,0 +1,158 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // simpleterm with meta key and 256 colors + AddTerminfo(&Terminfo{ + Name: "st-meta-truecolor", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + SetFgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%dm", + SetBgRGB: "\x1b[48;2;%p1%d;%p2%d;%p3%dm", + SetFgBgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%d;48;2;%p4%d;%p5%d;%p6%dm", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "177", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_st_truecolor.go b/vendor/github.com/gdamore/tcell/terminfo/term_st_truecolor.go new file mode 100644 index 00000000000..006cd1c9d42 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_st_truecolor.go @@ -0,0 +1,158 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // simpleterm with 256 colors + AddTerminfo(&Terminfo{ + Name: "st-truecolor", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + SetFgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%dm", + SetBgRGB: "\x1b[48;2;%p1%d;%p2%d;%p3%dm", + SetFgBgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%d;48;2;%p4%d;%p5%d;%p6%dm", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "177", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_sun.go b/vendor/github.com/gdamore/tcell/terminfo/term_sun.go new file mode 100644 index 00000000000..5858b4c2d3e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_sun.go @@ -0,0 +1,44 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Sun Microsystems Inc. workstation console + AddTerminfo(&Terminfo{ + Name: "sun", + Aliases: []string{"sun1", "sun2"}, + Columns: 80, + Lines: 34, + Bell: "\a", + Clear: "\f", + AttrOff: "\x1b[m", + Reverse: "\x1b[7m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[247z", + KeyDelete: "\u007f", + KeyBackspace: "\b", + KeyHome: "\x1b[214z", + KeyEnd: "\x1b[220z", + KeyPgUp: "\x1b[216z", + KeyPgDn: "\x1b[222z", + KeyF1: "\x1b[224z", + KeyF2: "\x1b[225z", + KeyF3: "\x1b[226z", + KeyF4: "\x1b[227z", + KeyF5: "\x1b[228z", + KeyF6: "\x1b[229z", + KeyF7: "\x1b[230z", + KeyF8: "\x1b[231z", + KeyF9: "\x1b[232z", + KeyF10: "\x1b[233z", + KeyF11: "\x1b[234z", + KeyF12: "\x1b[235z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_sun_color.go b/vendor/github.com/gdamore/tcell/terminfo/term_sun_color.go new file mode 100644 index 00000000000..e4ebe96b30f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_sun_color.go @@ -0,0 +1,48 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Sun Microsystems Workstation console with color support (IA systems) + AddTerminfo(&Terminfo{ + Name: "sun-color", + Columns: 80, + Lines: 34, + Colors: 8, + Bell: "\a", + Clear: "\f", + AttrOff: "\x1b[m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[247z", + KeyDelete: "\u007f", + KeyBackspace: "\b", + KeyHome: "\x1b[214z", + KeyEnd: "\x1b[220z", + KeyPgUp: "\x1b[216z", + KeyPgDn: "\x1b[222z", + KeyF1: "\x1b[224z", + KeyF2: "\x1b[225z", + KeyF3: "\x1b[226z", + KeyF4: "\x1b[227z", + KeyF5: "\x1b[228z", + KeyF6: "\x1b[229z", + KeyF7: "\x1b[230z", + KeyF8: "\x1b[231z", + KeyF9: "\x1b[232z", + KeyF10: "\x1b[233z", + KeyF11: "\x1b[234z", + KeyF12: "\x1b[235z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_termite.go b/vendor/github.com/gdamore/tcell/terminfo/term_termite.go new file mode 100644 index 00000000000..8e7f683c784 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_termite.go @@ -0,0 +1,152 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // VTE-based terminal + AddTerminfo(&Terminfo{ + Name: "xterm-termite", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\xff", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_tvi910.go b/vendor/github.com/gdamore/tcell/terminfo/term_tvi910.go new file mode 100644 index 00000000000..03b4bf98acf --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_tvi910.go @@ -0,0 +1,36 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // televideo model 910 + AddTerminfo(&Terminfo{ + Name: "tvi910", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1a", + AttrOff: "\x1bG0", + Underline: "\x1bG8", + Reverse: "\x1bG4", + PadChar: "\x00", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_tvi912.go b/vendor/github.com/gdamore/tcell/terminfo/term_tvi912.go new file mode 100644 index 00000000000..a6eeb8f71b9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_tvi912.go @@ -0,0 +1,35 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // old televideo 912/914/920 + AddTerminfo(&Terminfo{ + Name: "tvi912", + Aliases: []string{"tvi914", "tvi920"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1a", + Underline: "\x1bl", + PadChar: "\x00", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_tvi921.go b/vendor/github.com/gdamore/tcell/terminfo/term_tvi921.go new file mode 100644 index 00000000000..10860cfaf42 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_tvi921.go @@ -0,0 +1,31 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // televideo model 921 with sysline same as page & real vi function + AddTerminfo(&Terminfo{ + Name: "tvi921", + Columns: 80, + Lines: 24, + Clear: "\x1a", + ShowCursor: "\x1b.3", + AttrOff: "\x1bG0", + Underline: "\x1bG8", + Reverse: "\x1bG4", + PadChar: "\x00", + EnterAcs: "\x1b$", + ExitAcs: "\x1b%%", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c$<3/>", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\x16", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyClear: "\x1a", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_tvi925.go b/vendor/github.com/gdamore/tcell/terminfo/term_tvi925.go new file mode 100644 index 00000000000..d678ac03d27 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_tvi925.go @@ -0,0 +1,40 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // televideo 925 + AddTerminfo(&Terminfo{ + Name: "tvi925", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1a", + ShowCursor: "\x1b.4", + AttrOff: "\x1bG0", + Underline: "\x1bG8", + Reverse: "\x1bG4", + PadChar: "\x00", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\x16", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + KeyClear: "\x1a", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_tvi950.go b/vendor/github.com/gdamore/tcell/terminfo/term_tvi950.go new file mode 100644 index 00000000000..49d9e4ba8d4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_tvi950.go @@ -0,0 +1,43 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // televideo 950 + AddTerminfo(&Terminfo{ + Name: "tvi950", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b*", + AttrOff: "\x1bG0", + Underline: "\x1bG8", + Reverse: "\x1bG4", + PadChar: "\x00", + AltChars: "b\tc\fd\re\ni\v", + EnterAcs: "\x15", + ExitAcs: "\x18", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\x16", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + KeyClear: "\x1b*", + KeyBacktab: "\x1bI", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_tvi970.go b/vendor/github.com/gdamore/tcell/terminfo/term_tvi970.go new file mode 100644 index 00000000000..ef9166eecc1 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_tvi970.go @@ -0,0 +1,37 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // televideo 970 + AddTerminfo(&Terminfo{ + Name: "tvi970", + Columns: 80, + Lines: 24, + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?20l\x1b[?7h\x1b[1Q", + AttrOff: "\x1b[m", + Underline: "\x1b[4m", + PadChar: "\x00", + EnterAcs: "\x1b(B", + ExitAcs: "\x1b(B", + SetCursor: "\x1b[%i%p1%d;%p2%df", + CursorBack1: "\b", + CursorUp1: "\x1bM", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + KeyF1: "\x1b?a", + KeyF2: "\x1b?b", + KeyF3: "\x1b?c", + KeyF4: "\x1b?d", + KeyF5: "\x1b?e", + KeyF6: "\x1b?f", + KeyF7: "\x1b?g", + KeyF8: "\x1b?h", + KeyF9: "\x1b?i", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt100.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt100.go new file mode 100644 index 00000000000..8293cdaa2e4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt100.go @@ -0,0 +1,45 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // dec vt100 (w/advanced video) + AddTerminfo(&Terminfo{ + Name: "vt100", + Aliases: []string{"vt100-am"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<50>", + AttrOff: "\x1b[m\x0f$<2>", + Underline: "\x1b[4m$<2>", + Bold: "\x1b[1m$<2>", + Blink: "\x1b[5m$<2>", + Reverse: "\x1b[7m$<2>", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH$<5>", + CursorBack1: "\b", + CursorUp1: "\x1b[A$<2>", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1bOt", + KeyF6: "\x1bOu", + KeyF7: "\x1bOv", + KeyF8: "\x1bOl", + KeyF9: "\x1bOw", + KeyF10: "\x1bOx", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt102.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt102.go new file mode 100644 index 00000000000..414d36b3fc9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt102.go @@ -0,0 +1,44 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // dec vt102 + AddTerminfo(&Terminfo{ + Name: "vt102", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<50>", + AttrOff: "\x1b[m\x0f$<2>", + Underline: "\x1b[4m$<2>", + Bold: "\x1b[1m$<2>", + Blink: "\x1b[5m$<2>", + Reverse: "\x1b[7m$<2>", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH$<5>", + CursorBack1: "\b", + CursorUp1: "\x1b[A$<2>", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1bOt", + KeyF6: "\x1bOu", + KeyF7: "\x1bOv", + KeyF8: "\x1bOl", + KeyF9: "\x1bOw", + KeyF10: "\x1bOx", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt220.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt220.go new file mode 100644 index 00000000000..5d0d7489019 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt220.go @@ -0,0 +1,55 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // dec vt220 + AddTerminfo(&Terminfo{ + Name: "vt220", + Aliases: []string{"vt200"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0$<2>", + ExitAcs: "\x1b(B$<4>", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyHelp: "\x1b[28~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt320.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt320.go new file mode 100644 index 00000000000..7832e082429 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt320.go @@ -0,0 +1,60 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // dec vt320 7 bit terminal + AddTerminfo(&Terminfo{ + Name: "vt320", + Aliases: []string{"vt300"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt400.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt400.go new file mode 100644 index 00000000000..2a7cadadad4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt400.go @@ -0,0 +1,43 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // dec vt400 24x80 column autowrap + AddTerminfo(&Terminfo{ + Name: "vt400", + Aliases: []string{"vt400-24", "dec-vt400"}, + Columns: 80, + Lines: 24, + Clear: "\x1b[H\x1b[J$<10/>", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt420.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt420.go new file mode 100644 index 00000000000..b8a4a1c202d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt420.go @@ -0,0 +1,50 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // DEC VT420 + AddTerminfo(&Terminfo{ + Name: "vt420", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[2J$<50>", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B$<2>", + Underline: "\x1b[4m", + Bold: "\x1b[1m$<2>", + Blink: "\x1b[5m$<2>", + Reverse: "\x1b[7m$<2>", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0$<2>", + ExitAcs: "\x1b(B$<4>", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH$<10>", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[17~", + KeyF6: "\x1b[18~", + KeyF7: "\x1b[19~", + KeyF8: "\x1b[20~", + KeyF9: "\x1b[21~", + KeyF10: "\x1b[29~", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_vt52.go b/vendor/github.com/gdamore/tcell/terminfo/term_vt52.go new file mode 100644 index 00000000000..2bf190b7398 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_vt52.go @@ -0,0 +1,26 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // dec vt52 + AddTerminfo(&Terminfo{ + Name: "vt52", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1bH\x1bJ", + PadChar: "\x00", + AltChars: "+h.k0affggolpnqprrss", + EnterAcs: "\x1bF", + ExitAcs: "\x1bG", + SetCursor: "\x1bY%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\x1bD", + CursorUp1: "\x1bA", + KeyUp: "\x1bA", + KeyDown: "\x1bB", + KeyRight: "\x1bC", + KeyLeft: "\x1bD", + KeyBackspace: "\b", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_wy50.go b/vendor/github.com/gdamore/tcell/terminfo/term_wy50.go new file mode 100644 index 00000000000..022dda9d054 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_wy50.go @@ -0,0 +1,56 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Wyse 50 + AddTerminfo(&Terminfo{ + Name: "wy50", + Aliases: []string{"wyse50"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b+$<20>", + ShowCursor: "\x1b`1", + HideCursor: "\x1b`0", + AttrOff: "\x1b(\x1bH\x03", + Dim: "\x1b`7\x1b)", + Reverse: "\x1b`6\x1b)", + PadChar: "\x00", + AltChars: "0wa_h[jukslrmqnxqzttuyv]wpxv", + EnterAcs: "\x1bH\x02", + ExitAcs: "\x1bH\x03", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyPgUp: "\x1bJ", + KeyPgDn: "\x1bK", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + KeyF10: "\x01I\r", + KeyF11: "\x01J\r", + KeyF12: "\x01K\r", + KeyF13: "\x01L\r", + KeyF14: "\x01M\r", + KeyF15: "\x01N\r", + KeyF16: "\x01O\r", + KeyPrint: "\x1bP", + KeyBacktab: "\x1bI", + KeyShfHome: "\x1b{", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_wy60.go b/vendor/github.com/gdamore/tcell/terminfo/term_wy60.go new file mode 100644 index 00000000000..a737bb85745 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_wy60.go @@ -0,0 +1,60 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Wyse 60 + AddTerminfo(&Terminfo{ + Name: "wy60", + Aliases: []string{"wyse60"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b+$<100>", + EnterCA: "\x1bw0", + ExitCA: "\x1bw1", + ShowCursor: "\x1b`1", + HideCursor: "\x1b`0", + AttrOff: "\x1b(\x1bH\x03\x1bG0\x1bcD", + Underline: "\x1bG8", + Dim: "\x1bGp", + Blink: "\x1bG2", + Reverse: "\x1bG4", + PadChar: "\x00", + AltChars: "+/,.0[a2fxgqh1ihjYk?lZm@nEqDtCu4vAwBx3yszr{c~~", + EnterAcs: "\x1bcE", + ExitAcs: "\x1bcD", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyPgUp: "\x1bJ", + KeyPgDn: "\x1bK", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + KeyF10: "\x01I\r", + KeyF11: "\x01J\r", + KeyF12: "\x01K\r", + KeyF13: "\x01L\r", + KeyF14: "\x01M\r", + KeyF15: "\x01N\r", + KeyF16: "\x01O\r", + KeyPrint: "\x1bP", + KeyBacktab: "\x1bI", + KeyShfHome: "\x1b{", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_wy99_ansi.go b/vendor/github.com/gdamore/tcell/terminfo/term_wy99_ansi.go new file mode 100644 index 00000000000..6bf0d69c0ce --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_wy99_ansi.go @@ -0,0 +1,58 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Wyse WY-99GT in ansi mode (int'l PC keyboard) + AddTerminfo(&Terminfo{ + Name: "wy99-ansi", + Columns: 80, + Lines: 25, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<200>", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f\x1b[\"q", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h", + ExitKeypad: "\x1b[?1l", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooqqssttuuvvwwxx{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b$<1>", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[M", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF17: "\x1b[K", + KeyF18: "\x1b[31~", + KeyF19: "\x1b[32~", + KeyF20: "\x1b[33~", + KeyF21: "\x1b[34~", + KeyF22: "\x1b[35~", + KeyF23: "\x1b[1~", + KeyF24: "\x1b[2~", + KeyBacktab: "\x1b[z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_wy99a_ansi.go b/vendor/github.com/gdamore/tcell/terminfo/term_wy99a_ansi.go new file mode 100644 index 00000000000..1d7f6f21976 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_wy99a_ansi.go @@ -0,0 +1,58 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Wyse WY-99GT in ansi mode (US PC keyboard) + AddTerminfo(&Terminfo{ + Name: "wy99a-ansi", + Columns: 80, + Lines: 25, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<200>", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f\x1b[\"q", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h", + ExitKeypad: "\x1b[?1l", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooqqssttuuvvwwxx{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b$<1>", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[M", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF17: "\x1b[K", + KeyF18: "\x1b[31~", + KeyF19: "\x1b[32~", + KeyF20: "\x1b[33~", + KeyF21: "\x1b[34~", + KeyF22: "\x1b[35~", + KeyF23: "\x1b[1~", + KeyF24: "\x1b[2~", + KeyBacktab: "\x1b[z", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_xfce.go b/vendor/github.com/gdamore/tcell/terminfo/term_xfce.go new file mode 100644 index 00000000000..bb39be953c1 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_xfce.go @@ -0,0 +1,153 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Xfce Terminal + AddTerminfo(&Terminfo{ + Name: "xfce", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1bO1;2P", + KeyF14: "\x1bO1;2Q", + KeyF15: "\x1bO1;2R", + KeyF16: "\x1bO1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1bO1;5P", + KeyF26: "\x1bO1;5Q", + KeyF27: "\x1bO1;5R", + KeyF28: "\x1bO1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1bO1;6P", + KeyF38: "\x1bO1;6Q", + KeyF39: "\x1bO1;6R", + KeyF40: "\x1bO1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1bO1;3P", + KeyF50: "\x1bO1;3Q", + KeyF51: "\x1bO1;3R", + KeyF52: "\x1bO1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1bO1;4P", + KeyF62: "\x1bO1;4Q", + KeyF63: "\x1bO1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_xnuppc.go b/vendor/github.com/gdamore/tcell/terminfo/term_xnuppc.go new file mode 100644 index 00000000000..b1dafd6b91b --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_xnuppc.go @@ -0,0 +1,31 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // Darwin PowerPC Console (color) + AddTerminfo(&Terminfo{ + Name: "xnuppc", + Aliases: []string{"darwin"}, + Colors: 8, + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\x1b[D", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\u007f", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_xterm.go b/vendor/github.com/gdamore/tcell/terminfo/term_xterm.go new file mode 100644 index 00000000000..1e4d296ef43 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_xterm.go @@ -0,0 +1,154 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // X11 terminal emulator + AddTerminfo(&Terminfo{ + Name: "xterm", + Aliases: []string{"xterm-debian"}, + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_xterm_256color.go b/vendor/github.com/gdamore/tcell/terminfo/term_xterm_256color.go new file mode 100644 index 00000000000..f95d21e8150 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_xterm_256color.go @@ -0,0 +1,153 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // xterm with 256 colors + AddTerminfo(&Terminfo{ + Name: "xterm-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/term_xterm_truecolor.go b/vendor/github.com/gdamore/tcell/terminfo/term_xterm_truecolor.go new file mode 100644 index 00000000000..d035e44e884 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/term_xterm_truecolor.go @@ -0,0 +1,155 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package terminfo + +func init() { + // xterm with 256 colors + AddTerminfo(&Terminfo{ + Name: "xterm-truecolor", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + SetFgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%dm", + SetBgRGB: "\x1b[48;2;%p1%d;%p2%d;%p3%dm", + SetFgBgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%d;48;2;%p4%d;%p5%d;%p6%dm", + Mouse: "\x1b[M", + MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[1;2P", + KeyF14: "\x1b[1;2Q", + KeyF15: "\x1b[1;2R", + KeyF16: "\x1b[1;2S", + KeyF17: "\x1b[15;2~", + KeyF18: "\x1b[17;2~", + KeyF19: "\x1b[18;2~", + KeyF20: "\x1b[19;2~", + KeyF21: "\x1b[20;2~", + KeyF22: "\x1b[21;2~", + KeyF23: "\x1b[23;2~", + KeyF24: "\x1b[24;2~", + KeyF25: "\x1b[1;5P", + KeyF26: "\x1b[1;5Q", + KeyF27: "\x1b[1;5R", + KeyF28: "\x1b[1;5S", + KeyF29: "\x1b[15;5~", + KeyF30: "\x1b[17;5~", + KeyF31: "\x1b[18;5~", + KeyF32: "\x1b[19;5~", + KeyF33: "\x1b[20;5~", + KeyF34: "\x1b[21;5~", + KeyF35: "\x1b[23;5~", + KeyF36: "\x1b[24;5~", + KeyF37: "\x1b[1;6P", + KeyF38: "\x1b[1;6Q", + KeyF39: "\x1b[1;6R", + KeyF40: "\x1b[1;6S", + KeyF41: "\x1b[15;6~", + KeyF42: "\x1b[17;6~", + KeyF43: "\x1b[18;6~", + KeyF44: "\x1b[19;6~", + KeyF45: "\x1b[20;6~", + KeyF46: "\x1b[21;6~", + KeyF47: "\x1b[23;6~", + KeyF48: "\x1b[24;6~", + KeyF49: "\x1b[1;3P", + KeyF50: "\x1b[1;3Q", + KeyF51: "\x1b[1;3R", + KeyF52: "\x1b[1;3S", + KeyF53: "\x1b[15;3~", + KeyF54: "\x1b[17;3~", + KeyF55: "\x1b[18;3~", + KeyF56: "\x1b[19;3~", + KeyF57: "\x1b[20;3~", + KeyF58: "\x1b[21;3~", + KeyF59: "\x1b[23;3~", + KeyF60: "\x1b[24;3~", + KeyF61: "\x1b[1;4P", + KeyF62: "\x1b[1;4Q", + KeyF63: "\x1b[1;4R", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[1;2D", + KeyShfRight: "\x1b[1;2C", + KeyShfUp: "\x1b[1;2A", + KeyShfDown: "\x1b[1;2B", + KeyCtrlLeft: "\x1b[1;5D", + KeyCtrlRight: "\x1b[1;5C", + KeyCtrlUp: "\x1b[1;5A", + KeyCtrlDown: "\x1b[1;5B", + KeyMetaLeft: "\x1b[1;9D", + KeyMetaRight: "\x1b[1;9C", + KeyMetaUp: "\x1b[1;9A", + KeyMetaDown: "\x1b[1;9B", + KeyAltLeft: "\x1b[1;3D", + KeyAltRight: "\x1b[1;3C", + KeyAltUp: "\x1b[1;3A", + KeyAltDown: "\x1b[1;3B", + KeyAltShfLeft: "\x1b[1;4D", + KeyAltShfRight: "\x1b[1;4C", + KeyAltShfUp: "\x1b[1;4A", + KeyAltShfDown: "\x1b[1;4B", + KeyMetaShfLeft: "\x1b[1;10D", + KeyMetaShfRight: "\x1b[1;10C", + KeyMetaShfUp: "\x1b[1;10A", + KeyMetaShfDown: "\x1b[1;10B", + KeyCtrlShfLeft: "\x1b[1;6D", + KeyCtrlShfRight: "\x1b[1;6C", + KeyCtrlShfUp: "\x1b[1;6A", + KeyCtrlShfDown: "\x1b[1;6B", + KeyShfHome: "\x1b[1;2H", + KeyShfEnd: "\x1b[1;2F", + KeyCtrlHome: "\x1b[1;5H", + KeyCtrlEnd: "\x1b[1;5F", + KeyAltHome: "\x1b[1;9H", + KeyAltEnd: "\x1b[1;9F", + KeyCtrlShfHome: "\x1b[1;6H", + KeyCtrlShfEnd: "\x1b[1;6F", + KeyMetaShfHome: "\x1b[1;10H", + KeyMetaShfEnd: "\x1b[1;10F", + KeyAltShfHome: "\x1b[1;4H", + KeyAltShfEnd: "\x1b[1;4F", + }) +} diff --git a/vendor/github.com/gdamore/tcell/terminfo/terminfo.go b/vendor/github.com/gdamore/tcell/terminfo/terminfo.go new file mode 100644 index 00000000000..881b9e01962 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/terminfo/terminfo.go @@ -0,0 +1,874 @@ +// Copyright 2018 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terminfo + +import ( + "bytes" + "compress/gzip" + "crypto/sha1" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" +) + +var ( + // ErrTermNotFound indicates that a suitable terminal entry could + // not be found. This can result from either not having TERM set, + // or from the TERM failing to support certain minimal functionality, + // in particular absolute cursor addressability (the cup capability) + // is required. For example, legacy "adm3" lacks this capability, + // whereas the slightly newer "adm3a" supports it. This failure + // occurs most often with "dumb". + ErrTermNotFound = errors.New("terminal entry not found") +) + +// Terminfo represents a terminfo entry. Note that we use friendly names +// in Go, but when we write out JSON, we use the same names as terminfo. +// The name, aliases and smous, rmous fields do not come from terminfo directly. +type Terminfo struct { + Name string `json:"name"` + Aliases []string `json:"aliases,omitempty"` + Columns int `json:"cols,omitempty"` // cols + Lines int `json:"lines,omitempty"` // lines + Colors int `json:"colors,omitempty"` // colors + Bell string `json:"bell,omitempty"` // bell + Clear string `json:"clear,omitempty"` // clear + EnterCA string `json:"smcup,omitempty"` // smcup + ExitCA string `json:"rmcup,omitempty"` // rmcup + ShowCursor string `json:"cnorm,omitempty"` // cnorm + HideCursor string `json:"civis,omitempty"` // civis + AttrOff string `json:"sgr0,omitempty"` // sgr0 + Underline string `json:"smul,omitempty"` // smul + Bold string `json:"bold,omitempty"` // bold + Blink string `json:"blink,omitempty"` // blink + Reverse string `json:"rev,omitempty"` // rev + Dim string `json:"dim,omitempty"` // dim + EnterKeypad string `json:"smkx,omitempty"` // smkx + ExitKeypad string `json:"rmkx,omitempty"` // rmkx + SetFg string `json:"setaf,omitempty"` // setaf + SetBg string `json:"setbg,omitempty"` // setab + SetCursor string `json:"cup,omitempty"` // cup + CursorBack1 string `json:"cub1,omitempty"` // cub1 + CursorUp1 string `json:"cuu1,omitempty"` // cuu1 + PadChar string `json:"pad,omitempty"` // pad + KeyBackspace string `json:"kbs,omitempty"` // kbs + KeyF1 string `json:"kf1,omitempty"` // kf1 + KeyF2 string `json:"kf2,omitempty"` // kf2 + KeyF3 string `json:"kf3,omitempty"` // kf3 + KeyF4 string `json:"kf4,omitempty"` // kf4 + KeyF5 string `json:"kf5,omitempty"` // kf5 + KeyF6 string `json:"kf6,omitempty"` // kf6 + KeyF7 string `json:"kf7,omitempty"` // kf7 + KeyF8 string `json:"kf8,omitempty"` // kf8 + KeyF9 string `json:"kf9,omitempty"` // kf9 + KeyF10 string `json:"kf10,omitempty"` // kf10 + KeyF11 string `json:"kf11,omitempty"` // kf11 + KeyF12 string `json:"kf12,omitempty"` // kf12 + KeyF13 string `json:"kf13,omitempty"` // kf13 + KeyF14 string `json:"kf14,omitempty"` // kf14 + KeyF15 string `json:"kf15,omitempty"` // kf15 + KeyF16 string `json:"kf16,omitempty"` // kf16 + KeyF17 string `json:"kf17,omitempty"` // kf17 + KeyF18 string `json:"kf18,omitempty"` // kf18 + KeyF19 string `json:"kf19,omitempty"` // kf19 + KeyF20 string `json:"kf20,omitempty"` // kf20 + KeyF21 string `json:"kf21,omitempty"` // kf21 + KeyF22 string `json:"kf22,omitempty"` // kf22 + KeyF23 string `json:"kf23,omitempty"` // kf23 + KeyF24 string `json:"kf24,omitempty"` // kf24 + KeyF25 string `json:"kf25,omitempty"` // kf25 + KeyF26 string `json:"kf26,omitempty"` // kf26 + KeyF27 string `json:"kf27,omitempty"` // kf27 + KeyF28 string `json:"kf28,omitempty"` // kf28 + KeyF29 string `json:"kf29,omitempty"` // kf29 + KeyF30 string `json:"kf30,omitempty"` // kf30 + KeyF31 string `json:"kf31,omitempty"` // kf31 + KeyF32 string `json:"kf32,omitempty"` // kf32 + KeyF33 string `json:"kf33,omitempty"` // kf33 + KeyF34 string `json:"kf34,omitempty"` // kf34 + KeyF35 string `json:"kf35,omitempty"` // kf35 + KeyF36 string `json:"kf36,omitempty"` // kf36 + KeyF37 string `json:"kf37,omitempty"` // kf37 + KeyF38 string `json:"kf38,omitempty"` // kf38 + KeyF39 string `json:"kf39,omitempty"` // kf39 + KeyF40 string `json:"kf40,omitempty"` // kf40 + KeyF41 string `json:"kf41,omitempty"` // kf41 + KeyF42 string `json:"kf42,omitempty"` // kf42 + KeyF43 string `json:"kf43,omitempty"` // kf43 + KeyF44 string `json:"kf44,omitempty"` // kf44 + KeyF45 string `json:"kf45,omitempty"` // kf45 + KeyF46 string `json:"kf46,omitempty"` // kf46 + KeyF47 string `json:"kf47,omitempty"` // kf47 + KeyF48 string `json:"kf48,omitempty"` // kf48 + KeyF49 string `json:"kf49,omitempty"` // kf49 + KeyF50 string `json:"kf50,omitempty"` // kf50 + KeyF51 string `json:"kf51,omitempty"` // kf51 + KeyF52 string `json:"kf52,omitempty"` // kf52 + KeyF53 string `json:"kf53,omitempty"` // kf53 + KeyF54 string `json:"kf54,omitempty"` // kf54 + KeyF55 string `json:"kf55,omitempty"` // kf55 + KeyF56 string `json:"kf56,omitempty"` // kf56 + KeyF57 string `json:"kf57,omitempty"` // kf57 + KeyF58 string `json:"kf58,omitempty"` // kf58 + KeyF59 string `json:"kf59,omitempty"` // kf59 + KeyF60 string `json:"kf60,omitempty"` // kf60 + KeyF61 string `json:"kf61,omitempty"` // kf61 + KeyF62 string `json:"kf62,omitempty"` // kf62 + KeyF63 string `json:"kf63,omitempty"` // kf63 + KeyF64 string `json:"kf64,omitempty"` // kf64 + KeyInsert string `json:"kich,omitempty"` // kich1 + KeyDelete string `json:"kdch,omitempty"` // kdch1 + KeyHome string `json:"khome,omitempty"` // khome + KeyEnd string `json:"kend,omitempty"` // kend + KeyHelp string `json:"khlp,omitempty"` // khlp + KeyPgUp string `json:"kpp,omitempty"` // kpp + KeyPgDn string `json:"knp,omitempty"` // knp + KeyUp string `json:"kcuu1,omitempty"` // kcuu1 + KeyDown string `json:"kcud1,omitempty"` // kcud1 + KeyLeft string `json:"kcub1,omitempty"` // kcub1 + KeyRight string `json:"kcuf1,omitempty"` // kcuf1 + KeyBacktab string `json:"kcbt,omitempty"` // kcbt + KeyExit string `json:"kext,omitempty"` // kext + KeyClear string `json:"kclr,omitempty"` // kclr + KeyPrint string `json:"kprt,omitempty"` // kprt + KeyCancel string `json:"kcan,omitempty"` // kcan + Mouse string `json:"kmous,omitempty"` // kmous + MouseMode string `json:"XM,omitempty"` // XM + AltChars string `json:"acsc,omitempty"` // acsc + EnterAcs string `json:"smacs,omitempty"` // smacs + ExitAcs string `json:"rmacs,omitempty"` // rmacs + EnableAcs string `json:"enacs,omitempty"` // enacs + KeyShfRight string `json:"kRIT,omitempty"` // kRIT + KeyShfLeft string `json:"kLFT,omitempty"` // kLFT + KeyShfHome string `json:"kHOM,omitempty"` // kHOM + KeyShfEnd string `json:"kEND,omitempty"` // kEND + + // These are non-standard extensions to terminfo. This includes + // true color support, and some additional keys. Its kind of bizarre + // that shifted variants of left and right exist, but not up and down. + // Terminal support for these are going to vary amongst XTerm + // emulations, so don't depend too much on them in your application. + + SetFgBg string `json:"_setfgbg,omitempty"` // setfgbg + SetFgBgRGB string `json:"_setfgbgrgb,omitempty"` // setfgbgrgb + SetFgRGB string `json:"_setfrgb,omitempty"` // setfrgb + SetBgRGB string `json:"_setbrgb,omitempty"` // setbrgb + KeyShfUp string `json:"_kscu1,omitempty"` // shift-up + KeyShfDown string `json:"_kscud1,omitempty"` // shift-down + KeyCtrlUp string `json:"_kccu1,omitempty"` // ctrl-up + KeyCtrlDown string `json:"_kccud1,omitempty"` // ctrl-left + KeyCtrlRight string `json:"_kccuf1,omitempty"` // ctrl-right + KeyCtrlLeft string `json:"_kccub1,omitempty"` // ctrl-left + KeyMetaUp string `json:"_kmcu1,omitempty"` // meta-up + KeyMetaDown string `json:"_kmcud1,omitempty"` // meta-left + KeyMetaRight string `json:"_kmcuf1,omitempty"` // meta-right + KeyMetaLeft string `json:"_kmcub1,omitempty"` // meta-left + KeyAltUp string `json:"_kacu1,omitempty"` // alt-up + KeyAltDown string `json:"_kacud1,omitempty"` // alt-left + KeyAltRight string `json:"_kacuf1,omitempty"` // alt-right + KeyAltLeft string `json:"_kacub1,omitempty"` // alt-left + KeyCtrlHome string `json:"_kchome,omitempty"` + KeyCtrlEnd string `json:"_kcend,omitempty"` + KeyMetaHome string `json:"_kmhome,omitempty"` + KeyMetaEnd string `json:"_kmend,omitempty"` + KeyAltHome string `json:"_kahome,omitempty"` + KeyAltEnd string `json:"_kaend,omitempty"` + KeyAltShfUp string `json:"_kascu1,omitempty"` + KeyAltShfDown string `json:"_kascud1,omitempty"` + KeyAltShfLeft string `json:"_kascub1,omitempty"` + KeyAltShfRight string `json:"_kascuf1,omitempty"` + KeyMetaShfUp string `json:"_kmscu1,omitempty"` + KeyMetaShfDown string `json:"_kmscud1,omitempty"` + KeyMetaShfLeft string `json:"_kmscub1,omitempty"` + KeyMetaShfRight string `json:"_kmscuf1,omitempty"` + KeyCtrlShfUp string `json:"_kcscu1,omitempty"` + KeyCtrlShfDown string `json:"_kcscud1,omitempty"` + KeyCtrlShfLeft string `json:"_kcscub1,omitempty"` + KeyCtrlShfRight string `json:"_kcscuf1,omitempty"` + KeyCtrlShfHome string `json:"_kcHOME,omitempty"` + KeyCtrlShfEnd string `json:"_kcEND,omitempty"` + KeyAltShfHome string `json:"_kaHOME,omitempty"` + KeyAltShfEnd string `json:"_kaEND,omitempty"` + KeyMetaShfHome string `json:"_kmHOME,omitempty"` + KeyMetaShfEnd string `json:"_kmEND,omitempty"` +} + +type stackElem struct { + s string + i int + isStr bool + isInt bool +} + +type stack []stackElem + +func (st stack) Push(v string) stack { + e := stackElem{ + s: v, + isStr: true, + } + return append(st, e) +} + +func (st stack) Pop() (string, stack) { + v := "" + if len(st) > 0 { + e := st[len(st)-1] + st = st[:len(st)-1] + if e.isStr { + v = e.s + } else { + v = strconv.Itoa(e.i) + } + } + return v, st +} + +func (st stack) PopInt() (int, stack) { + if len(st) > 0 { + e := st[len(st)-1] + st = st[:len(st)-1] + if e.isInt { + return e.i, st + } else if e.isStr { + i, _ := strconv.Atoi(e.s) + return i, st + } + } + return 0, st +} + +func (st stack) PopBool() (bool, stack) { + if len(st) > 0 { + e := st[len(st)-1] + st = st[:len(st)-1] + if e.isStr { + if e.s == "1" { + return true, st + } + return false, st + } else if e.i == 1 { + return true, st + } else { + return false, st + } + } + return false, st +} + +func (st stack) PushInt(i int) stack { + e := stackElem{ + i: i, + isInt: true, + } + return append(st, e) +} + +func (st stack) PushBool(i bool) stack { + if i { + return st.PushInt(1) + } + return st.PushInt(0) +} + +func nextch(s string, index int) (byte, int) { + if index < len(s) { + return s[index], index + 1 + } + return 0, index +} + +// static vars +var svars [26]string + +// paramsBuffer handles some persistent state for TParam. Technically we +// could probably dispense with this, but caching buffer arrays gives us +// a nice little performance boost. Furthermore, we know that TParam is +// rarely (never?) called re-entrantly, so we can just reuse the same +// buffers, making it thread-safe by stashing a lock. +type paramsBuffer struct { + out bytes.Buffer + buf bytes.Buffer + lk sync.Mutex +} + +// Start initializes the params buffer with the initial string data. +// It also locks the paramsBuffer. The caller must call End() when +// finished. +func (pb *paramsBuffer) Start(s string) { + pb.lk.Lock() + pb.out.Reset() + pb.buf.Reset() + pb.buf.WriteString(s) +} + +// End returns the final output from TParam, but it also releases the lock. +func (pb *paramsBuffer) End() string { + s := pb.out.String() + pb.lk.Unlock() + return s +} + +// NextCh returns the next input character to the expander. +func (pb *paramsBuffer) NextCh() (byte, error) { + return pb.buf.ReadByte() +} + +// PutCh "emits" (rather schedules for output) a single byte character. +func (pb *paramsBuffer) PutCh(ch byte) { + pb.out.WriteByte(ch) +} + +// PutString schedules a string for output. +func (pb *paramsBuffer) PutString(s string) { + pb.out.WriteString(s) +} + +var pb = ¶msBuffer{} + +// TParm takes a terminfo parameterized string, such as setaf or cup, and +// evaluates the string, and returns the result with the parameter +// applied. +func (t *Terminfo) TParm(s string, p ...int) string { + var stk stack + var a, b string + var ai, bi int + var ab bool + var dvars [26]string + var params [9]int + + pb.Start(s) + + // make sure we always have 9 parameters -- makes it easier + // later to skip checks + for i := 0; i < len(params) && i < len(p); i++ { + params[i] = p[i] + } + + nest := 0 + + for { + + ch, err := pb.NextCh() + if err != nil { + break + } + + if ch != '%' { + pb.PutCh(ch) + continue + } + + ch, err = pb.NextCh() + if err != nil { + // XXX Error + break + } + + switch ch { + case '%': // quoted % + pb.PutCh(ch) + + case 'i': // increment both parameters (ANSI cup support) + params[0]++ + params[1]++ + + case 'c', 's': + // NB: these, and 'd' below are special cased for + // efficiency. They could be handled by the richer + // format support below, less efficiently. + a, stk = stk.Pop() + pb.PutString(a) + + case 'd': + ai, stk = stk.PopInt() + pb.PutString(strconv.Itoa(ai)) + + case '0', '1', '2', '3', '4', 'x', 'X', 'o', ':': + // This is pretty suboptimal, but this is rarely used. + // None of the mainstream terminals use any of this, + // and it would surprise me if this code is ever + // executed outside of test cases. + f := "%" + if ch == ':' { + ch, _ = pb.NextCh() + } + f += string(ch) + for ch == '+' || ch == '-' || ch == '#' || ch == ' ' { + ch, _ = pb.NextCh() + f += string(ch) + } + for (ch >= '0' && ch <= '9') || ch == '.' { + ch, _ = pb.NextCh() + f += string(ch) + } + switch ch { + case 'd', 'x', 'X', 'o': + ai, stk = stk.PopInt() + pb.PutString(fmt.Sprintf(f, ai)) + case 'c', 's': + a, stk = stk.Pop() + pb.PutString(fmt.Sprintf(f, a)) + } + + case 'p': // push parameter + ch, _ = pb.NextCh() + ai = int(ch - '1') + if ai >= 0 && ai < len(params) { + stk = stk.PushInt(params[ai]) + } else { + stk = stk.PushInt(0) + } + + case 'P': // pop & store variable + ch, _ = pb.NextCh() + if ch >= 'A' && ch <= 'Z' { + svars[int(ch-'A')], stk = stk.Pop() + } else if ch >= 'a' && ch <= 'z' { + dvars[int(ch-'a')], stk = stk.Pop() + } + + case 'g': // recall & push variable + ch, _ = pb.NextCh() + if ch >= 'A' && ch <= 'Z' { + stk = stk.Push(svars[int(ch-'A')]) + } else if ch >= 'a' && ch <= 'z' { + stk = stk.Push(dvars[int(ch-'a')]) + } + + case '\'': // push(char) + ch, _ = pb.NextCh() + pb.NextCh() // must be ' but we don't check + stk = stk.Push(string(ch)) + + case '{': // push(int) + ai = 0 + ch, _ = pb.NextCh() + for ch >= '0' && ch <= '9' { + ai *= 10 + ai += int(ch - '0') + ch, _ = pb.NextCh() + } + // ch must be '}' but no verification + stk = stk.PushInt(ai) + + case 'l': // push(strlen(pop)) + a, stk = stk.Pop() + stk = stk.PushInt(len(a)) + + case '+': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai + bi) + + case '-': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai - bi) + + case '*': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai * bi) + + case '/': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + if bi != 0 { + stk = stk.PushInt(ai / bi) + } else { + stk = stk.PushInt(0) + } + + case 'm': // push(pop mod pop) + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + if bi != 0 { + stk = stk.PushInt(ai % bi) + } else { + stk = stk.PushInt(0) + } + + case '&': // AND + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai & bi) + + case '|': // OR + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai | bi) + + case '^': // XOR + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai ^ bi) + + case '~': // bit complement + ai, stk = stk.PopInt() + stk = stk.PushInt(ai ^ -1) + + case '!': // logical NOT + ai, stk = stk.PopInt() + stk = stk.PushBool(ai != 0) + + case '=': // numeric compare or string compare + b, stk = stk.Pop() + a, stk = stk.Pop() + stk = stk.PushBool(a == b) + + case '>': // greater than, numeric + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushBool(ai > bi) + + case '<': // less than, numeric + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushBool(ai < bi) + + case '?': // start conditional + + case 't': + ab, stk = stk.PopBool() + if ab { + // just keep going + break + } + nest = 0 + ifloop: + // this loop consumes everything until we hit our else, + // or the end of the conditional + for { + ch, err = pb.NextCh() + if err != nil { + break + } + if ch != '%' { + continue + } + ch, _ = pb.NextCh() + switch ch { + case ';': + if nest == 0 { + break ifloop + } + nest-- + case '?': + nest++ + case 'e': + if nest == 0 { + break ifloop + } + } + } + + case 'e': + // if we got here, it means we didn't use the else + // in the 't' case above, and we should skip until + // the end of the conditional + nest = 0 + elloop: + for { + ch, err = pb.NextCh() + if err != nil { + break + } + if ch != '%' { + continue + } + ch, _ = pb.NextCh() + switch ch { + case ';': + if nest == 0 { + break elloop + } + nest-- + case '?': + nest++ + } + } + + case ';': // endif + + } + } + + return pb.End() +} + +// TPuts emits the string to the writer, but expands inline padding +// indications (of the form $<[delay]> where [delay] is msec) to +// a suitable number of padding characters (usually null bytes) based +// upon the supplied baud. At high baud rates, more padding characters +// will be inserted. All Terminfo based strings should be emitted using +// this function. +func (t *Terminfo) TPuts(w io.Writer, s string, baud int) { + for { + beg := strings.Index(s, "$<") + if beg < 0 { + // Most strings don't need padding, which is good news! + io.WriteString(w, s) + return + } + io.WriteString(w, s[:beg]) + s = s[beg+2:] + end := strings.Index(s, ">") + if end < 0 { + // unterminated.. just emit bytes unadulterated + io.WriteString(w, "$<"+s) + return + } + val := s[:end] + s = s[end+1:] + padus := 0 + unit := 1000 + dot := false + loop: + for i := range val { + switch val[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + padus *= 10 + padus += int(val[i] - '0') + if dot { + unit *= 10 + } + case '.': + if !dot { + dot = true + } else { + break loop + } + default: + break loop + } + } + cnt := int(((baud / 8) * padus) / unit) + for cnt > 0 { + io.WriteString(w, t.PadChar) + cnt-- + } + } +} + +// TGoto returns a string suitable for addressing the cursor at the given +// row and column. The origin 0, 0 is in the upper left corner of the screen. +func (t *Terminfo) TGoto(col, row int) string { + return t.TParm(t.SetCursor, row, col) +} + +// TColor returns a string corresponding to the given foreground and background +// colors. Either fg or bg can be set to -1 to elide. +func (t *Terminfo) TColor(fi, bi int) string { + rv := "" + // As a special case, we map bright colors to lower versions if the + // color table only holds 8. For the remaining 240 colors, the user + // is out of luck. Someday we could create a mapping table, but its + // not worth it. + if t.Colors == 8 { + if fi > 7 && fi < 16 { + fi -= 8 + } + if bi > 7 && bi < 16 { + bi -= 8 + } + } + if t.Colors > fi && fi >= 0 { + rv += t.TParm(t.SetFg, fi) + } + if t.Colors > bi && bi >= 0 { + rv += t.TParm(t.SetBg, bi) + } + return rv +} + +var ( + dblock sync.Mutex + terminfos = make(map[string]*Terminfo) + aliases = make(map[string]string) +) + +// AddTerminfo can be called to register a new Terminfo entry. +func AddTerminfo(t *Terminfo) { + dblock.Lock() + terminfos[t.Name] = t + for _, x := range t.Aliases { + terminfos[x] = t + } + dblock.Unlock() +} + +func loadFromFile(fname string, term string) (*Terminfo, error) { + var e error + var f io.Reader + if f, e = os.Open(fname); e != nil { + return nil, e + } + if strings.HasSuffix(fname, ".gz") { + if f, e = gzip.NewReader(f); e != nil { + return nil, e + } + } + d := json.NewDecoder(f) + for { + t := &Terminfo{} + if e := d.Decode(t); e != nil { + if e == io.EOF { + return nil, ErrTermNotFound + } + return nil, e + } + if t.SetCursor == "" { + // This must be an alias record, return it. + return t, nil + } + if t.Name == term { + return t, nil + } + for _, a := range t.Aliases { + if a == term { + return t, nil + } + } + } +} + +// LookupTerminfo attempts to find a definition for the named $TERM. +// It first looks in the builtin database, which should cover just about +// everyone. If it can't find one there, then it will attempt to read +// one from the JSON file located in either $TCELLDB, $HOME/.tcelldb, +// or as a database file. +// +// The database files are named by taking terminal name, hashing it through +// sha1, and then a subdirectory of the form database/hash[0:2]/hash[0:8] +// (with an optional .gz extension). +// +// For other local database files, we will look for the database file using +// the terminal name, so database/term[0:2]/term[0:8], again with optional +// .gz extension. +func LookupTerminfo(name string) (*Terminfo, error) { + if name == "" { + // else on windows: index out of bounds + // on the name[0] reference below + return nil, ErrTermNotFound + } + + dblock.Lock() + t := terminfos[name] + dblock.Unlock() + + if t == nil { + + var files []string + letter := fmt.Sprintf("%02x", name[0]) + gzfile := path.Join(letter, name+".gz") + jsfile := path.Join(letter, name) + hash := fmt.Sprintf("%x", sha1.Sum([]byte(name))) + gzhfile := path.Join(hash[0:2], hash[0:8]+".gz") + jshfile := path.Join(hash[0:2], hash[0:8]) + + // Build up the search path. Old versions of tcell used a + // single database file, whereas the new ones locate them + // in JSON (optionally compressed) files. + // + // The search path for "xterm" (SHA1 sig e2e28a8e...) looks + // like this: + // + // $TCELLDB/78/xterm.gz + // $TCELLDB/78/xterm + // $TCELLDB + // $HOME/.tcelldb/e2/e2e28a8e.gz + // $HOME/.tcelldb/e2/e2e28a8e + // $HOME/.tcelldb/78/xterm.gz + // $HOME/.tcelldb/78/xterm + // $HOME/.tcelldb + // $GOPATH/terminfo/database/e2/e2e28a8e.gz + // $GOPATH/terminfo/database/e2/e2e28a8e + // $GOPATH/terminfo/database/78/xterm.gz + // $GOPATH/terminfo/database/78/xterm + // + // Note that the legacy name lookups (78/xterm etc.) are + // provided for compatibility. We do not actually deliver + // any files with this style of naming, to avoid collisions + // on case insensitive filesystems. (*cough* mac *cough*). + + // If $GOPATH set, honor it, else assume $HOME/go just like + // modern golang does. + gopath := os.Getenv("GOPATH") + if gopath == "" { + gopath = path.Join(os.Getenv("HOME"), "go") + } + if pth := os.Getenv("TCELLDB"); pth != "" { + files = append(files, + path.Join(pth, gzfile), + path.Join(pth, jsfile), + pth) + } + if pth := os.Getenv("HOME"); pth != "" { + pth = path.Join(pth, ".tcelldb") + files = append(files, + path.Join(pth, gzhfile), + path.Join(pth, jshfile), + path.Join(pth, gzfile), + path.Join(pth, jsfile), + pth) + } + + for _, pth := range filepath.SplitList(gopath) { + pth = path.Join(pth, "src", "github.com", + "gdamore", "tcell", "terminfo", "database") + files = append(files, + path.Join(pth, gzhfile), + path.Join(pth, jshfile), + path.Join(pth, gzfile), + path.Join(pth, jsfile)) + } + + for _, fname := range files { + t, _ = loadFromFile(fname, name) + if t != nil { + break + } + } + if t != nil { + if t.Name != name { + // Check for a database loop (no infinite + // recursion). + dblock.Lock() + if aliases[name] != "" { + dblock.Unlock() + return nil, ErrTermNotFound + } + aliases[name] = t.Name + dblock.Unlock() + return LookupTerminfo(t.Name) + } + dblock.Lock() + terminfos[name] = t + dblock.Unlock() + } + } + if t == nil { + return nil, ErrTermNotFound + } + return t, nil +} diff --git a/vendor/github.com/gdamore/tcell/tscreen.go b/vendor/github.com/gdamore/tcell/tscreen.go new file mode 100644 index 00000000000..4c64e833ea2 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen.go @@ -0,0 +1,1388 @@ +// Copyright 2017 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "bytes" + "io" + "os" + "strconv" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/text/transform" + + "github.com/gdamore/tcell/terminfo" +) + +// NewTerminfoScreen returns a Screen that uses the stock TTY interface +// and POSIX termios, combined with a terminfo description taken from +// the $TERM environment variable. It returns an error if the terminal +// is not supported for any reason. +// +// For terminals that do not support dynamic resize events, the $LINES +// $COLUMNS environment variables can be set to the actual window size, +// otherwise defaults taken from the terminal database are used. +func NewTerminfoScreen() (Screen, error) { + ti, e := terminfo.LookupTerminfo(os.Getenv("TERM")) + if e != nil { + return nil, e + } + t := &tScreen{ti: ti} + + t.keyexist = make(map[Key]bool) + t.keycodes = make(map[string]*tKeyCode) + if len(ti.Mouse) > 0 { + t.mouse = []byte(ti.Mouse) + } + t.prepareKeys() + t.buildAcsMap() + t.sigwinch = make(chan os.Signal, 10) + t.fallback = make(map[rune]string) + for k, v := range RuneFallbacks { + t.fallback[k] = v + } + + return t, nil +} + +// tKeyCode represents a combination of a key code and modifiers. +type tKeyCode struct { + key Key + mod ModMask +} + +// tScreen represents a screen backed by a terminfo implementation. +type tScreen struct { + ti *terminfo.Terminfo + h int + w int + fini bool + cells CellBuffer + in *os.File + out *os.File + curstyle Style + style Style + evch chan Event + sigwinch chan os.Signal + quit chan struct{} + indoneq chan struct{} + keyexist map[Key]bool + keycodes map[string]*tKeyCode + keychan chan []byte + keytimer *time.Timer + keyexpire time.Time + cx int + cy int + mouse []byte + clear bool + cursorx int + cursory int + tiosp *termiosPrivate + baud int + wasbtn bool + acs map[rune]string + charset string + encoder transform.Transformer + decoder transform.Transformer + fallback map[rune]string + colors map[Color]Color + palette []Color + truecolor bool + escaped bool + buttondn bool + + sync.Mutex +} + +func (t *tScreen) Init() error { + t.evch = make(chan Event, 10) + t.indoneq = make(chan struct{}) + t.keychan = make(chan []byte, 10) + t.keytimer = time.NewTimer(time.Millisecond * 50) + t.charset = "UTF-8" + + t.charset = getCharset() + if enc := GetEncoding(t.charset); enc != nil { + t.encoder = enc.NewEncoder() + t.decoder = enc.NewDecoder() + } else { + return ErrNoCharset + } + ti := t.ti + + // environment overrides + w := ti.Columns + h := ti.Lines + if i, _ := strconv.Atoi(os.Getenv("LINES")); i != 0 { + h = i + } + if i, _ := strconv.Atoi(os.Getenv("COLUMNS")); i != 0 { + w = i + } + if e := t.termioInit(); e != nil { + return e + } + + if t.ti.SetFgBgRGB != "" || t.ti.SetFgRGB != "" || t.ti.SetBgRGB != "" { + t.truecolor = true + } + // A user who wants to have his themes honored can + // set this environment variable. + if os.Getenv("TCELL_TRUECOLOR") == "disable" { + t.truecolor = false + } + if !t.truecolor { + t.colors = make(map[Color]Color) + t.palette = make([]Color, t.Colors()) + for i := 0; i < t.Colors(); i++ { + t.palette[i] = Color(i) + // identity map for our builtin colors + t.colors[Color(i)] = Color(i) + } + } + + t.TPuts(ti.EnterCA) + t.TPuts(ti.HideCursor) + t.TPuts(ti.EnableAcs) + t.TPuts(ti.Clear) + + t.quit = make(chan struct{}) + + t.Lock() + t.cx = -1 + t.cy = -1 + t.style = StyleDefault + t.cells.Resize(w, h) + t.cursorx = -1 + t.cursory = -1 + t.resize() + t.Unlock() + + go t.mainLoop() + go t.inputLoop() + + return nil +} + +func (t *tScreen) prepareKeyMod(key Key, mod ModMask, val string) { + if val != "" { + // Do not overrride codes that already exist + if _, exist := t.keycodes[val]; !exist { + t.keyexist[key] = true + t.keycodes[val] = &tKeyCode{key: key, mod: mod} + } + } +} + +func (t *tScreen) prepareKey(key Key, val string) { + t.prepareKeyMod(key, ModNone, val) +} + +func (t *tScreen) prepareKeys() { + ti := t.ti + t.prepareKey(KeyBackspace, ti.KeyBackspace) + t.prepareKey(KeyF1, ti.KeyF1) + t.prepareKey(KeyF2, ti.KeyF2) + t.prepareKey(KeyF3, ti.KeyF3) + t.prepareKey(KeyF4, ti.KeyF4) + t.prepareKey(KeyF5, ti.KeyF5) + t.prepareKey(KeyF6, ti.KeyF6) + t.prepareKey(KeyF7, ti.KeyF7) + t.prepareKey(KeyF8, ti.KeyF8) + t.prepareKey(KeyF9, ti.KeyF9) + t.prepareKey(KeyF10, ti.KeyF10) + t.prepareKey(KeyF11, ti.KeyF11) + t.prepareKey(KeyF12, ti.KeyF12) + t.prepareKey(KeyF13, ti.KeyF13) + t.prepareKey(KeyF14, ti.KeyF14) + t.prepareKey(KeyF15, ti.KeyF15) + t.prepareKey(KeyF16, ti.KeyF16) + t.prepareKey(KeyF17, ti.KeyF17) + t.prepareKey(KeyF18, ti.KeyF18) + t.prepareKey(KeyF19, ti.KeyF19) + t.prepareKey(KeyF20, ti.KeyF20) + t.prepareKey(KeyF21, ti.KeyF21) + t.prepareKey(KeyF22, ti.KeyF22) + t.prepareKey(KeyF23, ti.KeyF23) + t.prepareKey(KeyF24, ti.KeyF24) + t.prepareKey(KeyF25, ti.KeyF25) + t.prepareKey(KeyF26, ti.KeyF26) + t.prepareKey(KeyF27, ti.KeyF27) + t.prepareKey(KeyF28, ti.KeyF28) + t.prepareKey(KeyF29, ti.KeyF29) + t.prepareKey(KeyF30, ti.KeyF30) + t.prepareKey(KeyF31, ti.KeyF31) + t.prepareKey(KeyF32, ti.KeyF32) + t.prepareKey(KeyF33, ti.KeyF33) + t.prepareKey(KeyF34, ti.KeyF34) + t.prepareKey(KeyF35, ti.KeyF35) + t.prepareKey(KeyF36, ti.KeyF36) + t.prepareKey(KeyF37, ti.KeyF37) + t.prepareKey(KeyF38, ti.KeyF38) + t.prepareKey(KeyF39, ti.KeyF39) + t.prepareKey(KeyF40, ti.KeyF40) + t.prepareKey(KeyF41, ti.KeyF41) + t.prepareKey(KeyF42, ti.KeyF42) + t.prepareKey(KeyF43, ti.KeyF43) + t.prepareKey(KeyF44, ti.KeyF44) + t.prepareKey(KeyF45, ti.KeyF45) + t.prepareKey(KeyF46, ti.KeyF46) + t.prepareKey(KeyF47, ti.KeyF47) + t.prepareKey(KeyF48, ti.KeyF48) + t.prepareKey(KeyF49, ti.KeyF49) + t.prepareKey(KeyF50, ti.KeyF50) + t.prepareKey(KeyF51, ti.KeyF51) + t.prepareKey(KeyF52, ti.KeyF52) + t.prepareKey(KeyF53, ti.KeyF53) + t.prepareKey(KeyF54, ti.KeyF54) + t.prepareKey(KeyF55, ti.KeyF55) + t.prepareKey(KeyF56, ti.KeyF56) + t.prepareKey(KeyF57, ti.KeyF57) + t.prepareKey(KeyF58, ti.KeyF58) + t.prepareKey(KeyF59, ti.KeyF59) + t.prepareKey(KeyF60, ti.KeyF60) + t.prepareKey(KeyF61, ti.KeyF61) + t.prepareKey(KeyF62, ti.KeyF62) + t.prepareKey(KeyF63, ti.KeyF63) + t.prepareKey(KeyF64, ti.KeyF64) + t.prepareKey(KeyInsert, ti.KeyInsert) + t.prepareKey(KeyDelete, ti.KeyDelete) + t.prepareKey(KeyHome, ti.KeyHome) + t.prepareKey(KeyEnd, ti.KeyEnd) + t.prepareKey(KeyUp, ti.KeyUp) + t.prepareKey(KeyDown, ti.KeyDown) + t.prepareKey(KeyLeft, ti.KeyLeft) + t.prepareKey(KeyRight, ti.KeyRight) + t.prepareKey(KeyPgUp, ti.KeyPgUp) + t.prepareKey(KeyPgDn, ti.KeyPgDn) + t.prepareKey(KeyHelp, ti.KeyHelp) + t.prepareKey(KeyPrint, ti.KeyPrint) + t.prepareKey(KeyCancel, ti.KeyCancel) + t.prepareKey(KeyExit, ti.KeyExit) + t.prepareKey(KeyBacktab, ti.KeyBacktab) + + t.prepareKeyMod(KeyRight, ModShift, ti.KeyShfRight) + t.prepareKeyMod(KeyLeft, ModShift, ti.KeyShfLeft) + t.prepareKeyMod(KeyUp, ModShift, ti.KeyShfUp) + t.prepareKeyMod(KeyDown, ModShift, ti.KeyShfDown) + t.prepareKeyMod(KeyHome, ModShift, ti.KeyShfHome) + t.prepareKeyMod(KeyEnd, ModShift, ti.KeyShfEnd) + + t.prepareKeyMod(KeyRight, ModCtrl, ti.KeyCtrlRight) + t.prepareKeyMod(KeyLeft, ModCtrl, ti.KeyCtrlLeft) + t.prepareKeyMod(KeyUp, ModCtrl, ti.KeyCtrlUp) + t.prepareKeyMod(KeyDown, ModCtrl, ti.KeyCtrlDown) + t.prepareKeyMod(KeyHome, ModCtrl, ti.KeyCtrlHome) + t.prepareKeyMod(KeyEnd, ModCtrl, ti.KeyCtrlEnd) + + t.prepareKeyMod(KeyRight, ModAlt, ti.KeyAltRight) + t.prepareKeyMod(KeyLeft, ModAlt, ti.KeyAltLeft) + t.prepareKeyMod(KeyUp, ModAlt, ti.KeyAltUp) + t.prepareKeyMod(KeyDown, ModAlt, ti.KeyAltDown) + t.prepareKeyMod(KeyHome, ModAlt, ti.KeyAltHome) + t.prepareKeyMod(KeyEnd, ModAlt, ti.KeyAltEnd) + + t.prepareKeyMod(KeyRight, ModAlt, ti.KeyMetaRight) + t.prepareKeyMod(KeyLeft, ModAlt, ti.KeyMetaLeft) + t.prepareKeyMod(KeyUp, ModAlt, ti.KeyMetaUp) + t.prepareKeyMod(KeyDown, ModAlt, ti.KeyMetaDown) + t.prepareKeyMod(KeyHome, ModAlt, ti.KeyMetaHome) + t.prepareKeyMod(KeyEnd, ModAlt, ti.KeyMetaEnd) + + t.prepareKeyMod(KeyRight, ModAlt|ModShift, ti.KeyAltShfRight) + t.prepareKeyMod(KeyLeft, ModAlt|ModShift, ti.KeyAltShfLeft) + t.prepareKeyMod(KeyUp, ModAlt|ModShift, ti.KeyAltShfUp) + t.prepareKeyMod(KeyDown, ModAlt|ModShift, ti.KeyAltShfDown) + t.prepareKeyMod(KeyHome, ModAlt|ModShift, ti.KeyAltShfHome) + t.prepareKeyMod(KeyEnd, ModAlt|ModShift, ti.KeyAltShfEnd) + + t.prepareKeyMod(KeyRight, ModAlt|ModShift, ti.KeyMetaShfRight) + t.prepareKeyMod(KeyLeft, ModAlt|ModShift, ti.KeyMetaShfLeft) + t.prepareKeyMod(KeyUp, ModAlt|ModShift, ti.KeyMetaShfUp) + t.prepareKeyMod(KeyDown, ModAlt|ModShift, ti.KeyMetaShfDown) + t.prepareKeyMod(KeyHome, ModAlt|ModShift, ti.KeyMetaShfHome) + t.prepareKeyMod(KeyEnd, ModAlt|ModShift, ti.KeyMetaShfEnd) + + t.prepareKeyMod(KeyRight, ModCtrl|ModShift, ti.KeyCtrlShfRight) + t.prepareKeyMod(KeyLeft, ModCtrl|ModShift, ti.KeyCtrlShfLeft) + t.prepareKeyMod(KeyUp, ModCtrl|ModShift, ti.KeyCtrlShfUp) + t.prepareKeyMod(KeyDown, ModCtrl|ModShift, ti.KeyCtrlShfDown) + t.prepareKeyMod(KeyHome, ModCtrl|ModShift, ti.KeyCtrlShfHome) + t.prepareKeyMod(KeyEnd, ModCtrl|ModShift, ti.KeyCtrlShfEnd) + + // Sadly, xterm handling of keycodes is somewhat erratic. In + // particular, different codes are sent depending on application + // mode is in use or not, and the entries for many of these are + // simply absent from terminfo on many systems. So we insert + // a number of escape sequences if they are not already used, in + // order to have the widest correct usage. Note that prepareKey + // will not inject codes if the escape sequence is already known. + // We also only do this for terminals that have the application + // mode present. + + // Cursor mode + if ti.EnterKeypad != "" { + t.prepareKey(KeyUp, "\x1b[A") + t.prepareKey(KeyDown, "\x1b[B") + t.prepareKey(KeyRight, "\x1b[C") + t.prepareKey(KeyLeft, "\x1b[D") + t.prepareKey(KeyEnd, "\x1b[F") + t.prepareKey(KeyHome, "\x1b[H") + t.prepareKey(KeyDelete, "\x1b[3~") + t.prepareKey(KeyHome, "\x1b[1~") + t.prepareKey(KeyEnd, "\x1b[4~") + t.prepareKey(KeyPgUp, "\x1b[5~") + t.prepareKey(KeyPgDn, "\x1b[6~") + + // Application mode + t.prepareKey(KeyUp, "\x1bOA") + t.prepareKey(KeyDown, "\x1bOB") + t.prepareKey(KeyRight, "\x1bOC") + t.prepareKey(KeyLeft, "\x1bOD") + t.prepareKey(KeyHome, "\x1bOH") + } + +outer: + // Add key mappings for control keys. + for i := 0; i < ' '; i++ { + // Do not insert direct key codes for ambiguous keys. + // For example, ESC is used for lots of other keys, so + // when parsing this we don't want to fast path handling + // of it, but instead wait a bit before parsing it as in + // isolation. + for esc := range t.keycodes { + if []byte(esc)[0] == byte(i) { + continue outer + } + } + + t.keyexist[Key(i)] = true + + mod := ModCtrl + switch Key(i) { + case KeyBS, KeyTAB, KeyESC, KeyCR: + // directly typeable- no control sequence + mod = ModNone + } + t.keycodes[string(rune(i))] = &tKeyCode{key: Key(i), mod: mod} + } +} + +func (t *tScreen) Fini() { + t.Lock() + defer t.Unlock() + + ti := t.ti + t.cells.Resize(0, 0) + t.TPuts(ti.ShowCursor) + t.TPuts(ti.AttrOff) + t.TPuts(ti.Clear) + t.TPuts(ti.ExitCA) + t.TPuts(ti.ExitKeypad) + t.TPuts(ti.TParm(ti.MouseMode, 0)) + t.curstyle = Style(-1) + t.clear = false + t.fini = true + + select { + case <-t.quit: + // do nothing, already closed + + default: + close(t.quit) + } + + t.termioFini() +} + +func (t *tScreen) SetStyle(style Style) { + t.Lock() + if !t.fini { + t.style = style + } + t.Unlock() +} + +func (t *tScreen) Clear() { + t.Fill(' ', t.style) +} + +func (t *tScreen) Fill(r rune, style Style) { + t.Lock() + if !t.fini { + t.cells.Fill(r, style) + } + t.Unlock() +} + +func (t *tScreen) SetContent(x, y int, mainc rune, combc []rune, style Style) { + t.Lock() + if !t.fini { + t.cells.SetContent(x, y, mainc, combc, style) + } + t.Unlock() +} + +func (t *tScreen) GetContent(x, y int) (rune, []rune, Style, int) { + t.Lock() + mainc, combc, style, width := t.cells.GetContent(x, y) + t.Unlock() + return mainc, combc, style, width +} + +func (t *tScreen) SetCell(x, y int, style Style, ch ...rune) { + if len(ch) > 0 { + t.SetContent(x, y, ch[0], ch[1:], style) + } else { + t.SetContent(x, y, ' ', nil, style) + } +} + +func (t *tScreen) encodeRune(r rune, buf []byte) []byte { + + nb := make([]byte, 6) + ob := make([]byte, 6) + num := utf8.EncodeRune(ob, r) + ob = ob[:num] + dst := 0 + var err error + if enc := t.encoder; enc != nil { + enc.Reset() + dst, _, err = enc.Transform(nb, ob, true) + } + if err != nil || dst == 0 || nb[0] == '\x1a' { + // Combining characters are elided + if len(buf) == 0 { + if acs, ok := t.acs[r]; ok { + buf = append(buf, []byte(acs)...) + } else if fb, ok := t.fallback[r]; ok { + buf = append(buf, []byte(fb)...) + } else { + buf = append(buf, '?') + } + } + } else { + buf = append(buf, nb[:dst]...) + } + + return buf +} + +func (t *tScreen) sendFgBg(fg Color, bg Color) { + ti := t.ti + if ti.Colors == 0 { + return + } + if t.truecolor { + if ti.SetFgBgRGB != "" && + fg != ColorDefault && bg != ColorDefault { + r1, g1, b1 := fg.RGB() + r2, g2, b2 := bg.RGB() + t.TPuts(ti.TParm(ti.SetFgBgRGB, + int(r1), int(g1), int(b1), + int(r2), int(g2), int(b2))) + } else { + if fg != ColorDefault && ti.SetFgRGB != "" { + r, g, b := fg.RGB() + t.TPuts(ti.TParm(ti.SetFgRGB, + int(r), int(g), int(b))) + } + if bg != ColorDefault && ti.SetBgRGB != "" { + r, g, b := bg.RGB() + t.TPuts(ti.TParm(ti.SetBgRGB, + int(r), int(g), int(b))) + } + } + return + } + + if fg != ColorDefault { + if v, ok := t.colors[fg]; ok { + fg = v + } else { + v = FindColor(fg, t.palette) + t.colors[fg] = v + fg = v + } + } + + if bg != ColorDefault { + if v, ok := t.colors[bg]; ok { + bg = v + } else { + v = FindColor(bg, t.palette) + t.colors[bg] = v + bg = v + } + } + + if ti.SetFgBg != "" && fg != ColorDefault && bg != ColorDefault { + t.TPuts(ti.TParm(ti.SetFgBg, int(fg), int(bg))) + } else { + if fg != ColorDefault && ti.SetFg != "" { + t.TPuts(ti.TParm(ti.SetFg, int(fg))) + } + if bg != ColorDefault && ti.SetBg != "" { + t.TPuts(ti.TParm(ti.SetBg, int(bg))) + } + } +} + +func (t *tScreen) drawCell(x, y int) int { + + ti := t.ti + + mainc, combc, style, width := t.cells.GetContent(x, y) + if !t.cells.Dirty(x, y) { + return width + } + + if t.cy != y || t.cx != x { + t.TPuts(ti.TGoto(x, y)) + t.cx = x + t.cy = y + } + + if style == StyleDefault { + style = t.style + } + if style != t.curstyle { + fg, bg, attrs := style.Decompose() + + t.TPuts(ti.AttrOff) + + t.sendFgBg(fg, bg) + if attrs&AttrBold != 0 { + t.TPuts(ti.Bold) + } + if attrs&AttrUnderline != 0 { + t.TPuts(ti.Underline) + } + if attrs&AttrReverse != 0 { + t.TPuts(ti.Reverse) + } + if attrs&AttrBlink != 0 { + t.TPuts(ti.Blink) + } + if attrs&AttrDim != 0 { + t.TPuts(ti.Dim) + } + t.curstyle = style + } + // now emit runes - taking care to not overrun width with a + // wide character, and to ensure that we emit exactly one regular + // character followed up by any residual combing characters + + if width < 1 { + width = 1 + } + + var str string + + buf := make([]byte, 0, 6) + + buf = t.encodeRune(mainc, buf) + for _, r := range combc { + buf = t.encodeRune(r, buf) + } + + str = string(buf) + if width > 1 && str == "?" { + // No FullWidth character support + str = "? " + t.cx = -1 + } + + // XXX: check for hazeltine not being able to display ~ + + if x > t.w-width { + // too wide to fit; emit a single space instead + width = 1 + str = " " + } + io.WriteString(t.out, str) + t.cx += width + t.cells.SetDirty(x, y, false) + if width > 1 { + t.cx = -1 + } + + return width +} + +func (t *tScreen) ShowCursor(x, y int) { + t.Lock() + t.cursorx = x + t.cursory = y + t.Unlock() +} + +func (t *tScreen) HideCursor() { + t.ShowCursor(-1, -1) +} + +func (t *tScreen) showCursor() { + + x, y := t.cursorx, t.cursory + w, h := t.cells.Size() + if x < 0 || y < 0 || x >= w || y >= h { + t.hideCursor() + return + } + t.TPuts(t.ti.TGoto(x, y)) + t.TPuts(t.ti.ShowCursor) + t.cx = x + t.cy = y +} + +func (t *tScreen) TPuts(s string) { + t.ti.TPuts(t.out, s, t.baud) +} + +func (t *tScreen) Show() { + t.Lock() + if !t.fini { + t.resize() + t.draw() + } + t.Unlock() +} + +func (t *tScreen) clearScreen() { + fg, bg, _ := t.style.Decompose() + t.sendFgBg(fg, bg) + t.TPuts(t.ti.Clear) + t.clear = false +} + +func (t *tScreen) hideCursor() { + // does not update cursor position + if t.ti.HideCursor != "" { + t.TPuts(t.ti.HideCursor) + } else { + // No way to hide cursor, stick it + // at bottom right of screen + t.cx, t.cy = t.cells.Size() + t.TPuts(t.ti.TGoto(t.cx, t.cy)) + } +} + +func (t *tScreen) draw() { + // clobber cursor position, because we're gonna change it all + t.cx = -1 + t.cy = -1 + + // hide the cursor while we move stuff around + t.hideCursor() + + if t.clear { + t.clearScreen() + } + + for y := 0; y < t.h; y++ { + for x := 0; x < t.w; x++ { + width := t.drawCell(x, y) + if width > 1 { + if x+1 < t.w { + // this is necessary so that if we ever + // go back to drawing that cell, we + // actually will *draw* it. + t.cells.SetDirty(x+1, y, true) + } + } + x += width - 1 + } + } + + // restore the cursor + t.showCursor() +} + +func (t *tScreen) EnableMouse() { + if len(t.mouse) != 0 { + t.TPuts(t.ti.TParm(t.ti.MouseMode, 1)) + } +} + +func (t *tScreen) DisableMouse() { + if len(t.mouse) != 0 { + t.TPuts(t.ti.TParm(t.ti.MouseMode, 0)) + } +} + +func (t *tScreen) Size() (int, int) { + t.Lock() + w, h := t.w, t.h + t.Unlock() + return w, h +} + +func (t *tScreen) resize() { + if w, h, e := t.getWinSize(); e == nil { + if w != t.w || h != t.h { + t.cx = -1 + t.cy = -1 + + t.cells.Resize(w, h) + t.cells.Invalidate() + t.h = h + t.w = w + ev := NewEventResize(w, h) + t.PostEvent(ev) + } + } +} + +func (t *tScreen) Colors() int { + // this doesn't change, no need for lock + if t.truecolor { + return 1 << 24 + } + return t.ti.Colors +} + +func (t *tScreen) PollEvent() Event { + select { + case <-t.quit: + return nil + case ev := <-t.evch: + return ev + } +} + +// vtACSNames is a map of bytes defined by terminfo that are used in +// the terminals Alternate Character Set to represent other glyphs. +// For example, the upper left corner of the box drawing set can be +// displayed by printing "l" while in the alternate character set. +// Its not quite that simple, since the "l" is the terminfo name, +// and it may be necessary to use a different character based on +// the terminal implementation (or the terminal may lack support for +// this altogether). See buildAcsMap below for detail. +var vtACSNames = map[byte]rune{ + '+': RuneRArrow, + ',': RuneLArrow, + '-': RuneUArrow, + '.': RuneDArrow, + '0': RuneBlock, + '`': RuneDiamond, + 'a': RuneCkBoard, + 'b': '␉', // VT100, Not defined by terminfo + 'c': '␌', // VT100, Not defined by terminfo + 'd': '␋', // VT100, Not defined by terminfo + 'e': '␊', // VT100, Not defined by terminfo + 'f': RuneDegree, + 'g': RunePlMinus, + 'h': RuneBoard, + 'i': RuneLantern, + 'j': RuneLRCorner, + 'k': RuneURCorner, + 'l': RuneULCorner, + 'm': RuneLLCorner, + 'n': RunePlus, + 'o': RuneS1, + 'p': RuneS3, + 'q': RuneHLine, + 'r': RuneS7, + 's': RuneS9, + 't': RuneLTee, + 'u': RuneRTee, + 'v': RuneBTee, + 'w': RuneTTee, + 'x': RuneVLine, + 'y': RuneLEqual, + 'z': RuneGEqual, + '{': RunePi, + '|': RuneNEqual, + '}': RuneSterling, + '~': RuneBullet, +} + +// buildAcsMap builds a map of characters that we translate from Unicode to +// alternate character encodings. To do this, we use the standard VT100 ACS +// maps. This is only done if the terminal lacks support for Unicode; we +// always prefer to emit Unicode glyphs when we are able. +func (t *tScreen) buildAcsMap() { + acsstr := t.ti.AltChars + t.acs = make(map[rune]string) + for len(acsstr) > 2 { + srcv := acsstr[0] + dstv := string(acsstr[1]) + if r, ok := vtACSNames[srcv]; ok { + t.acs[r] = t.ti.EnterAcs + dstv + t.ti.ExitAcs + } + acsstr = acsstr[2:] + } +} + +func (t *tScreen) PostEventWait(ev Event) { + t.evch <- ev +} + +func (t *tScreen) PostEvent(ev Event) error { + select { + case t.evch <- ev: + return nil + default: + return ErrEventQFull + } +} + +func (t *tScreen) clip(x, y int) (int, int) { + w, h := t.cells.Size() + if x < 0 { + x = 0 + } + if y < 0 { + y = 0 + } + if x > w-1 { + x = w - 1 + } + if y > h-1 { + y = h - 1 + } + return x, y +} + +func (t *tScreen) postMouseEvent(x, y, btn int) { + + // XTerm mouse events only report at most one button at a time, + // which may include a wheel button. Wheel motion events are + // reported as single impulses, while other button events are reported + // as separate press & release events. + + button := ButtonNone + mod := ModNone + + // Mouse wheel has bit 6 set, no release events. It should be noted + // that wheel events are sometimes misdelivered as mouse button events + // during a click-drag, so we debounce these, considering them to be + // button press events unless we see an intervening release event. + switch btn & 0x43 { + case 0: + button = Button1 + t.wasbtn = true + case 1: + button = Button2 + t.wasbtn = true + case 2: + button = Button3 + t.wasbtn = true + case 3: + button = ButtonNone + t.wasbtn = false + case 0x40: + if !t.wasbtn { + button = WheelUp + } else { + button = Button1 + } + case 0x41: + if !t.wasbtn { + button = WheelDown + } else { + button = Button2 + } + } + + if btn&0x4 != 0 { + mod |= ModShift + } + if btn&0x8 != 0 { + mod |= ModAlt + } + if btn&0x10 != 0 { + mod |= ModCtrl + } + + // Some terminals will report mouse coordinates outside the + // screen, especially with click-drag events. Clip the coordinates + // to the screen in that case. + x, y = t.clip(x, y) + + ev := NewEventMouse(x, y, button, mod) + t.PostEvent(ev) +} + +// parseSgrMouse attempts to locate an SGR mouse record at the start of the +// buffer. It returns true, true if it found one, and the associated bytes +// be removed from the buffer. It returns true, false if the buffer might +// contain such an event, but more bytes are necessary (partial match), and +// false, false if the content is definitely *not* an SGR mouse record. +func (t *tScreen) parseSgrMouse(buf *bytes.Buffer) (bool, bool) { + + b := buf.Bytes() + + var x, y, btn, state int + dig := false + neg := false + motion := false + i := 0 + val := 0 + + for i = range b { + switch b[i] { + case '\x1b': + if state != 0 { + return false, false + } + state = 1 + + case '\x9b': + if state != 0 { + return false, false + } + state = 2 + + case '[': + if state != 1 { + return false, false + } + state = 2 + + case '<': + if state != 2 { + return false, false + } + val = 0 + dig = false + neg = false + state = 3 + + case '-': + if state != 3 && state != 4 && state != 5 { + return false, false + } + if dig || neg { + return false, false + } + neg = true // stay in state + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if state != 3 && state != 4 && state != 5 { + return false, false + } + val *= 10 + val += int(b[i] - '0') + dig = true // stay in state + + case ';': + if neg { + val = -val + } + switch state { + case 3: + btn, val = val, 0 + neg, dig, state = false, false, 4 + case 4: + x, val = val-1, 0 + neg, dig, state = false, false, 5 + default: + return false, false + } + + case 'm', 'M': + if state != 5 { + return false, false + } + if neg { + val = -val + } + y = val - 1 + + motion = (btn & 32) != 0 + btn &^= 32 + if b[i] == 'm' { + // mouse release, clear all buttons + btn |= 3 + btn &^= 0x40 + t.buttondn = false + } else if motion { + /* + * Some broken terminals appear to send + * mouse button one motion events, instead of + * encoding 35 (no buttons) into these events. + * We resolve these by looking for a non-motion + * event first. + */ + if !t.buttondn { + btn |= 3 + btn &^= 0x40 + } + } else { + t.buttondn = true + } + // consume the event bytes + for i >= 0 { + buf.ReadByte() + i-- + } + t.postMouseEvent(x, y, btn) + return true, true + } + } + + // incomplete & inconclusve at this point + return true, false +} + +// parseXtermMouse is like parseSgrMouse, but it parses a legacy +// X11 mouse record. +func (t *tScreen) parseXtermMouse(buf *bytes.Buffer) (bool, bool) { + + b := buf.Bytes() + + state := 0 + btn := 0 + x := 0 + y := 0 + + for i := range b { + switch state { + case 0: + switch b[i] { + case '\x1b': + state = 1 + case '\x9b': + state = 2 + default: + return false, false + } + case 1: + if b[i] != '[' { + return false, false + } + state = 2 + case 2: + if b[i] != 'M' { + return false, false + } + state++ + case 3: + btn = int(b[i]) + state++ + case 4: + x = int(b[i]) - 32 - 1 + state++ + case 5: + y = int(b[i]) - 32 - 1 + for i >= 0 { + buf.ReadByte() + i-- + } + t.postMouseEvent(x, y, btn) + return true, true + } + } + return true, false +} + +func (t *tScreen) parseFunctionKey(buf *bytes.Buffer) (bool, bool) { + b := buf.Bytes() + partial := false + for e, k := range t.keycodes { + esc := []byte(e) + if (len(esc) == 1) && (esc[0] == '\x1b') { + continue + } + if bytes.HasPrefix(b, esc) { + // matched + var r rune + if len(esc) == 1 { + r = rune(b[0]) + } + mod := k.mod + if t.escaped { + mod |= ModAlt + t.escaped = false + } + ev := NewEventKey(k.key, r, mod) + t.PostEvent(ev) + for i := 0; i < len(esc); i++ { + buf.ReadByte() + } + return true, true + } + if bytes.HasPrefix(esc, b) { + partial = true + } + } + return partial, false +} + +func (t *tScreen) parseRune(buf *bytes.Buffer) (bool, bool) { + b := buf.Bytes() + if b[0] >= ' ' && b[0] <= 0x7F { + // printable ASCII easy to deal with -- no encodings + mod := ModNone + if t.escaped { + mod = ModAlt + t.escaped = false + } + ev := NewEventKey(KeyRune, rune(b[0]), mod) + t.PostEvent(ev) + buf.ReadByte() + return true, true + } + + if b[0] < 0x80 { + // Low numbered values are control keys, not runes. + return false, false + } + + utfb := make([]byte, 12) + for l := 1; l <= len(b); l++ { + t.decoder.Reset() + nout, nin, e := t.decoder.Transform(utfb, b[:l], true) + if e == transform.ErrShortSrc { + continue + } + if nout != 0 { + r, _ := utf8.DecodeRune(utfb[:nout]) + if r != utf8.RuneError { + mod := ModNone + if t.escaped { + mod = ModAlt + t.escaped = false + } + ev := NewEventKey(KeyRune, r, mod) + t.PostEvent(ev) + } + for nin > 0 { + buf.ReadByte() + nin-- + } + return true, true + } + } + // Looks like potential escape + return true, false +} + +func (t *tScreen) scanInput(buf *bytes.Buffer, expire bool) { + + t.Lock() + defer t.Unlock() + + for { + b := buf.Bytes() + if len(b) == 0 { + buf.Reset() + return + } + + partials := 0 + + if part, comp := t.parseRune(buf); comp { + continue + } else if part { + partials++ + } + + if part, comp := t.parseFunctionKey(buf); comp { + continue + } else if part { + partials++ + } + + // Only parse mouse records if this term claims to have + // mouse support + + if t.ti.Mouse != "" { + if part, comp := t.parseXtermMouse(buf); comp { + continue + } else if part { + partials++ + } + + if part, comp := t.parseSgrMouse(buf); comp { + continue + } else if part { + partials++ + } + } + + if partials == 0 || expire { + if b[0] == '\x1b' { + if len(b) == 1 { + ev := NewEventKey(KeyEsc, 0, ModNone) + t.PostEvent(ev) + t.escaped = false + } else { + t.escaped = true + } + buf.ReadByte() + continue + } + // Nothing was going to match, or we timed out + // waiting for more data -- just deliver the characters + // to the app & let them sort it out. Possibly we + // should only do this for control characters like ESC. + by, _ := buf.ReadByte() + mod := ModNone + if t.escaped { + t.escaped = false + mod = ModAlt + } + ev := NewEventKey(KeyRune, rune(by), mod) + t.PostEvent(ev) + continue + } + + // well we have some partial data, wait until we get + // some more + break + } +} + +func (t *tScreen) mainLoop() { + buf := &bytes.Buffer{} + for { + select { + case <-t.quit: + close(t.indoneq) + return + case <-t.sigwinch: + t.Lock() + t.cx = -1 + t.cy = -1 + t.resize() + t.cells.Invalidate() + t.draw() + t.Unlock() + continue + case <-t.keytimer.C: + // If the timer fired, and the current time + // is after the expiration of the escape sequence, + // then we assume the escape sequence reached it's + // conclusion, and process the chunk independently. + // This lets us detect conflicts such as a lone ESC. + if buf.Len() > 0 { + if time.Now().After(t.keyexpire) { + t.scanInput(buf, true) + } + } + if buf.Len() > 0 { + if !t.keytimer.Stop() { + select { + case <-t.keytimer.C: + default: + } + } + t.keytimer.Reset(time.Millisecond * 50) + } + case chunk := <-t.keychan: + buf.Write(chunk) + t.keyexpire = time.Now().Add(time.Millisecond * 50) + t.scanInput(buf, false) + if !t.keytimer.Stop() { + select { + case <-t.keytimer.C: + default: + } + } + if buf.Len() > 0 { + t.keytimer.Reset(time.Millisecond * 50) + } + } + } +} + +func (t *tScreen) inputLoop() { + + for { + chunk := make([]byte, 128) + n, e := t.in.Read(chunk) + switch e { + case io.EOF: + case nil: + default: + t.PostEvent(NewEventError(e)) + return + } + t.keychan <- chunk[:n] + } +} + +func (t *tScreen) Sync() { + t.Lock() + t.cx = -1 + t.cy = -1 + if !t.fini { + t.resize() + t.clear = true + t.cells.Invalidate() + t.draw() + } + t.Unlock() +} + +func (t *tScreen) CharacterSet() string { + return t.charset +} + +func (t *tScreen) RegisterRuneFallback(orig rune, fallback string) { + t.Lock() + t.fallback[orig] = fallback + t.Unlock() +} + +func (t *tScreen) UnregisterRuneFallback(orig rune) { + t.Lock() + delete(t.fallback, orig) + t.Unlock() +} + +func (t *tScreen) CanDisplay(r rune, checkFallbacks bool) bool { + + if enc := t.encoder; enc != nil { + nb := make([]byte, 6) + ob := make([]byte, 6) + num := utf8.EncodeRune(ob, r) + + enc.Reset() + dst, _, err := enc.Transform(nb, ob[:num], true) + if dst != 0 && err == nil && nb[0] != '\x1A' { + return true + } + } + // Terminal fallbacks always permitted, since we assume they are + // basically nearly perfect renditions. + if _, ok := t.acs[r]; ok { + return true + } + if !checkFallbacks { + return false + } + if _, ok := t.fallback[r]; ok { + return true + } + return false +} + +func (t *tScreen) HasMouse() bool { + return len(t.mouse) != 0 +} + +func (t *tScreen) HasKey(k Key) bool { + if k == KeyRune { + return true + } + return t.keyexist[k] +} + +func (t *tScreen) Resize(int, int, int, int) {} diff --git a/vendor/github.com/gdamore/tcell/tscreen_bsd.go b/vendor/github.com/gdamore/tcell/tscreen_bsd.go new file mode 100644 index 00000000000..86d749b7334 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen_bsd.go @@ -0,0 +1,119 @@ +// +build freebsd netbsd openbsd dragonfly + +// Copyright 2018 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "os" + "os/signal" + "syscall" + "unsafe" +) + +type termiosPrivate syscall.Termios + +func (t *tScreen) termioInit() error { + var e error + var newtios termiosPrivate + var fd uintptr + var tios uintptr + var ioc uintptr + t.tiosp = &termiosPrivate{} + + if t.in, e = os.OpenFile("/dev/tty", os.O_RDONLY, 0); e != nil { + goto failed + } + if t.out, e = os.OpenFile("/dev/tty", os.O_WRONLY, 0); e != nil { + goto failed + } + + tios = uintptr(unsafe.Pointer(t.tiosp)) + ioc = uintptr(syscall.TIOCGETA) + fd = uintptr(t.out.Fd()) + if _, _, e1 := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0); e1 != 0 { + e = e1 + goto failed + } + + // On this platform (FreeBSD and family), the baud rate is stored + // directly as an integer in termios.c_ospeed. No bitmasking required. + t.baud = int(t.tiosp.Ospeed) + newtios = *t.tiosp + newtios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | + syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | + syscall.ICRNL | syscall.IXON + newtios.Oflag &^= syscall.OPOST + newtios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | + syscall.ISIG | syscall.IEXTEN + newtios.Cflag &^= syscall.CSIZE | syscall.PARENB + newtios.Cflag |= syscall.CS8 + + tios = uintptr(unsafe.Pointer(&newtios)) + + ioc = uintptr(syscall.TIOCSETA) + if _, _, e1 := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0); e1 != 0 { + e = e1 + goto failed + } + + signal.Notify(t.sigwinch, syscall.SIGWINCH) + + if w, h, e := t.getWinSize(); e == nil && w != 0 && h != 0 { + t.cells.Resize(w, h) + } + + return nil + +failed: + if t.in != nil { + t.in.Close() + } + if t.out != nil { + t.out.Close() + } + return e +} + +func (t *tScreen) termioFini() { + + signal.Stop(t.sigwinch) + + <-t.indoneq + + if t.out != nil { + fd := uintptr(t.out.Fd()) + ioc := uintptr(syscall.TIOCSETAF) + tios := uintptr(unsafe.Pointer(t.tiosp)) + syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0) + t.out.Close() + } + if t.in != nil { + t.in.Close() + } +} + +func (t *tScreen) getWinSize() (int, int, error) { + + fd := uintptr(t.out.Fd()) + dim := [4]uint16{} + dimp := uintptr(unsafe.Pointer(&dim)) + ioc := uintptr(syscall.TIOCGWINSZ) + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, + fd, ioc, dimp, 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dim[1]), int(dim[0]), nil +} diff --git a/vendor/github.com/gdamore/tcell/tscreen_darwin.go b/vendor/github.com/gdamore/tcell/tscreen_darwin.go new file mode 100644 index 00000000000..df51cb5fd24 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen_darwin.go @@ -0,0 +1,140 @@ +// +build darwin + +// Copyright 2018 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// The Darwin system is *almost* a real BSD system, but it suffers from +// a brain damaged TTY driver. This TTY driver does not actually +// wake up in poll() or similar calls, which means that we cannot reliably +// shut down the terminal without resorting to obscene custom C code +// and a dedicated poller thread. +// +// So instead, we do a best effort, and simply try to do the close in the +// background. Probably this will cause a leak of two goroutines and +// maybe also the file descriptor, meaning that applications on Darwin +// can't reinitialize the screen, but that's probably a very rare behavior, +// and accepting that is the best of some very poor alternative options. +// +// Maybe someday Apple will fix there tty driver, but its been broken for +// a long time (probably forever) so holding one's breath is contraindicated. + +import ( + "os" + "os/signal" + "syscall" + "unsafe" +) + +type termiosPrivate syscall.Termios + +func (t *tScreen) termioInit() error { + var e error + var newtios termiosPrivate + var fd uintptr + var tios uintptr + var ioc uintptr + t.tiosp = &termiosPrivate{} + + if t.in, e = os.OpenFile("/dev/tty", os.O_RDONLY, 0); e != nil { + goto failed + } + if t.out, e = os.OpenFile("/dev/tty", os.O_WRONLY, 0); e != nil { + goto failed + } + + tios = uintptr(unsafe.Pointer(t.tiosp)) + ioc = uintptr(syscall.TIOCGETA) + fd = uintptr(t.out.Fd()) + if _, _, e1 := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0); e1 != 0 { + e = e1 + goto failed + } + + // On this platform (FreeBSD and family), the baud rate is stored + // directly as an integer in termios.c_ospeed. No bitmasking required. + t.baud = int(t.tiosp.Ospeed) + newtios = *t.tiosp + newtios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | + syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | + syscall.ICRNL | syscall.IXON + newtios.Oflag &^= syscall.OPOST + newtios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | + syscall.ISIG | syscall.IEXTEN + newtios.Cflag &^= syscall.CSIZE | syscall.PARENB + newtios.Cflag |= syscall.CS8 + + tios = uintptr(unsafe.Pointer(&newtios)) + + ioc = uintptr(syscall.TIOCSETA) + if _, _, e1 := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0); e1 != 0 { + e = e1 + goto failed + } + + signal.Notify(t.sigwinch, syscall.SIGWINCH) + + if w, h, e := t.getWinSize(); e == nil && w != 0 && h != 0 { + t.cells.Resize(w, h) + } + + return nil + +failed: + if t.in != nil { + t.in.Close() + } + if t.out != nil { + t.out.Close() + } + return e +} + +func (t *tScreen) termioFini() { + + signal.Stop(t.sigwinch) + + <-t.indoneq + + if t.out != nil { + fd := uintptr(t.out.Fd()) + ioc := uintptr(syscall.TIOCSETAF) + tios := uintptr(unsafe.Pointer(t.tiosp)) + syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0) + t.out.Close() + } + + // See above -- we background this call which might help, but + // really the tty is probably open. + + go func() { + if t.in != nil { + t.in.Close() + } + }() +} + +func (t *tScreen) getWinSize() (int, int, error) { + + fd := uintptr(t.out.Fd()) + dim := [4]uint16{} + dimp := uintptr(unsafe.Pointer(&dim)) + ioc := uintptr(syscall.TIOCGWINSZ) + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, + fd, ioc, dimp, 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dim[1]), int(dim[0]), nil +} diff --git a/vendor/github.com/gdamore/tcell/tscreen_linux.go b/vendor/github.com/gdamore/tcell/tscreen_linux.go new file mode 100644 index 00000000000..79602f4c036 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen_linux.go @@ -0,0 +1,129 @@ +// +build linux + +// Copyright 2017 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "os" + "os/signal" + "syscall" + "unsafe" +) + +type termiosPrivate syscall.Termios + +func (t *tScreen) termioInit() error { + var e error + var newtios termiosPrivate + var fd uintptr + var tios uintptr + var ioc uintptr + t.tiosp = &termiosPrivate{} + + if t.in, e = os.OpenFile("/dev/tty", os.O_RDONLY, 0); e != nil { + goto failed + } + if t.out, e = os.OpenFile("/dev/tty", os.O_WRONLY, 0); e != nil { + goto failed + } + + tios = uintptr(unsafe.Pointer(t.tiosp)) + ioc = uintptr(syscall.TCGETS) + fd = uintptr(t.out.Fd()) + if _, _, e1 := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0); e1 != 0 { + e = e1 + goto failed + } + + // On this platform, the baud rate is stored + // directly as an integer in termios.c_ospeed. + t.baud = int(t.tiosp.Ospeed) + newtios = *t.tiosp + newtios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | + syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | + syscall.ICRNL | syscall.IXON + newtios.Oflag &^= syscall.OPOST + newtios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | + syscall.ISIG | syscall.IEXTEN + newtios.Cflag &^= syscall.CSIZE | syscall.PARENB + newtios.Cflag |= syscall.CS8 + + // This is setup for blocking reads. In the past we attempted to + // use non-blocking reads, but now a separate input loop and timer + // copes with the problems we had on some systems (BSD/Darwin) + // where close hung forever. + newtios.Cc[syscall.VMIN] = 1 + newtios.Cc[syscall.VTIME] = 0 + + tios = uintptr(unsafe.Pointer(&newtios)) + + // Well this kind of sucks, because we don't have TCSETSF, but only + // TCSETS. This can leave some output unflushed. + ioc = uintptr(syscall.TCSETS) + if _, _, e1 := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0); e1 != 0 { + e = e1 + goto failed + } + + signal.Notify(t.sigwinch, syscall.SIGWINCH) + + if w, h, e := t.getWinSize(); e == nil && w != 0 && h != 0 { + t.cells.Resize(w, h) + } + + return nil + +failed: + if t.in != nil { + t.in.Close() + } + if t.out != nil { + t.out.Close() + } + return e +} + +func (t *tScreen) termioFini() { + + signal.Stop(t.sigwinch) + + <-t.indoneq + + if t.out != nil { + fd := uintptr(t.out.Fd()) + // XXX: We'd really rather do TCSETSF here! + ioc := uintptr(syscall.TCSETS) + tios := uintptr(unsafe.Pointer(t.tiosp)) + syscall.Syscall6(syscall.SYS_IOCTL, fd, ioc, tios, 0, 0, 0) + t.out.Close() + } + if t.in != nil { + t.in.Close() + } +} + +func (t *tScreen) getWinSize() (int, int, error) { + + fd := uintptr(t.out.Fd()) + dim := [4]uint16{} + dimp := uintptr(unsafe.Pointer(&dim)) + ioc := uintptr(syscall.TIOCGWINSZ) + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, + fd, ioc, dimp, 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dim[1]), int(dim[0]), nil +} diff --git a/vendor/github.com/gdamore/tcell/tscreen_posix.go b/vendor/github.com/gdamore/tcell/tscreen_posix.go new file mode 100644 index 00000000000..66fbe04b365 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen_posix.go @@ -0,0 +1,206 @@ +// +build solaris + +// Copyright 2017 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "os" + "os/signal" + "syscall" +) + +// #include +// #include +// +// int getwinsize(int fd, int *cols, int *rows) { +// #if defined TIOCGWINSZ +// struct winsize w; +// if (ioctl(fd, TIOCGWINSZ, &w) < 0) { +// return (-1); +// } +// *cols = w.ws_col; +// *rows = w.ws_row; +// return (0); +// #else +// return (-1); +// #endif +// } +// +// int getbaud(struct termios *tios) { +// switch (cfgetospeed(tios)) { +// #ifdef B0 +// case B0: return (0); +// #endif +// #ifdef B50 +// case B50: return (50); +// #endif +// #ifdef B75 +// case B75: return (75); +// #endif +// #ifdef B110 +// case B110: return (110); +// #endif +// #ifdef B134 +// case B134: return (134); +// #endif +// #ifdef B150 +// case B150: return (150); +// #endif +// #ifdef B200 +// case B200: return (200); +// #endif +// #ifdef B300 +// case B300: return (300); +// #endif +// #ifdef B600 +// case B600: return (600); +// #endif +// #ifdef B1200 +// case B1200: return (1200); +// #endif +// #ifdef B1800 +// case B1800: return (1800); +// #endif +// #ifdef B2400 +// case B2400: return (2400); +// #endif +// #ifdef B4800 +// case B4800: return (4800); +// #endif +// #ifdef B9600 +// case B9600: return (9600); +// #endif +// #ifdef B19200 +// case B19200: return (19200); +// #endif +// #ifdef B38400 +// case B38400: return (38400); +// #endif +// #ifdef B57600 +// case B57600: return (57600); +// #endif +// #ifdef B76800 +// case B76800: return (76800); +// #endif +// #ifdef B115200 +// case B115200: return (115200); +// #endif +// #ifdef B153600 +// case B153600: return (153600); +// #endif +// #ifdef B230400 +// case B230400: return (230400); +// #endif +// #ifdef B307200 +// case B307200: return (307200); +// #endif +// #ifdef B460800 +// case B460800: return (460800); +// #endif +// #ifdef B921600 +// case B921600: return (921600); +// #endif +// } +// return (0); +// } +import "C" + +type termiosPrivate struct { + tios C.struct_termios +} + +func (t *tScreen) termioInit() error { + var e error + var rv C.int + var newtios C.struct_termios + var fd C.int + + if t.in, e = os.OpenFile("/dev/tty", os.O_RDONLY, 0); e != nil { + goto failed + } + if t.out, e = os.OpenFile("/dev/tty", os.O_WRONLY, 0); e != nil { + goto failed + } + + t.tiosp = &termiosPrivate{} + + fd = C.int(t.out.Fd()) + if rv, e = C.tcgetattr(fd, &t.tiosp.tios); rv != 0 { + goto failed + } + t.baud = int(C.getbaud(&t.tiosp.tios)) + newtios = t.tiosp.tios + newtios.c_iflag &^= C.IGNBRK | C.BRKINT | C.PARMRK | + C.ISTRIP | C.INLCR | C.IGNCR | + C.ICRNL | C.IXON + newtios.c_oflag &^= C.OPOST + newtios.c_lflag &^= C.ECHO | C.ECHONL | C.ICANON | + C.ISIG | C.IEXTEN + newtios.c_cflag &^= C.CSIZE | C.PARENB + newtios.c_cflag |= C.CS8 + + // This is setup for blocking reads. In the past we attempted to + // use non-blocking reads, but now a separate input loop and timer + // copes with the problems we had on some systems (BSD/Darwin) + // where close hung forever. + newtios.Cc[syscall.VMIN] = 1 + newtios.Cc[syscall.VTIME] = 0 + + if rv, e = C.tcsetattr(fd, C.TCSANOW|C.TCSAFLUSH, &newtios); rv != 0 { + goto failed + } + + signal.Notify(t.sigwinch, syscall.SIGWINCH) + + if w, h, e := t.getWinSize(); e == nil && w != 0 && h != 0 { + t.cells.Resize(w, h) + } + + return nil + +failed: + if t.in != nil { + t.in.Close() + } + if t.out != nil { + t.out.Close() + } + return e +} + +func (t *tScreen) termioFini() { + + signal.Stop(t.sigwinch) + + <-t.indoneq + + if t.out != nil { + fd := C.int(t.out.Fd()) + C.tcsetattr(fd, C.TCSANOW|C.TCSAFLUSH, &t.tiosp.tios) + t.out.Close() + } + if t.in != nil { + t.in.Close() + } +} + +func (t *tScreen) getWinSize() (int, int, error) { + var cx, cy C.int + if r, e := C.getwinsize(C.int(t.out.Fd()), &cx, &cy); r != 0 { + return 0, 0, e + } + return int(cx), int(cy), nil +} diff --git a/vendor/github.com/gdamore/tcell/tscreen_stub.go b/vendor/github.com/gdamore/tcell/tscreen_stub.go new file mode 100644 index 00000000000..91de26e17b0 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen_stub.go @@ -0,0 +1,32 @@ +// +build nacl plan9 + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// This stub file is for systems that have no termios. + +type termiosPrivate struct{} + +func (t *tScreen) termioInit() error { + return ErrNoScreen +} + +func (t *tScreen) termioFini() { +} + +func (t *tScreen) getWinSize() (int, int, error) { + return 0, 0, ErrNoScreen +} diff --git a/vendor/github.com/gdamore/tcell/tscreen_windows.go b/vendor/github.com/gdamore/tcell/tscreen_windows.go new file mode 100644 index 00000000000..daac09764c0 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/tscreen_windows.go @@ -0,0 +1,40 @@ +// +build windows + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// On Windows we don't have support for termios. We probably could, and +// may should, in a cygwin type environment. Its not clear how to make +// this all work nicely with both cygwin and Windows console, so we +// decline to do so here. + +func (t *tScreen) termioInit() error { + return ErrNoScreen +} + +func (t *tScreen) termioFini() { + return +} + +func (t *tScreen) getWinSize() (int, int, error) { + return 0, 0, ErrNoScreen +} + +func (t *tScreen) getCharset() string { + return "UTF-16LE" +} + +type termiosPrivate struct{} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go new file mode 100644 index 00000000000..4eb79bb4e5a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tarball provides facilities for reading/writing v1.Images from/to +// a tarball on-disk. +package tarball diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go new file mode 100644 index 00000000000..2a62327ce6c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -0,0 +1,338 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/google/go-containerregistry/pkg/v1/v1util" +) + +type image struct { + opener Opener + td *tarDescriptor + config []byte + imgDescriptor *singleImageTarDescriptor + + tag *name.Tag +} + +type uncompressedImage struct { + *image +} + +type compressedImage struct { + *image + manifestLock sync.Mutex // Protects manifest + manifest *v1.Manifest +} + +var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) +var _ partial.CompressedImageCore = (*compressedImage)(nil) + +type Opener func() (io.ReadCloser, error) + +func pathOpener(path string) Opener { + return func() (io.ReadCloser, error) { + return os.Open(path) + } +} + +func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { + return Image(pathOpener(path), tag) +} + +// Image exposes an image from the tarball at the provided path. +func Image(opener Opener, tag *name.Tag) (v1.Image, error) { + img := &image{ + opener: opener, + tag: tag, + } + if err := img.loadTarDescriptorAndConfig(); err != nil { + return nil, err + } + + // Peek at the first layer and see if it's compressed. + compressed, err := img.areLayersCompressed() + if err != nil { + return nil, err + } + if compressed { + c := compressedImage{ + image: img, + } + return partial.CompressedToImage(&c) + } + + uc := uncompressedImage{ + image: img, + } + return partial.UncompressedToImage(&uc) +} + +func (i *image) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// singleImageTarDescriptor is the struct used to represent a single image inside a `docker save` tarball. +type singleImageTarDescriptor struct { + Config string + RepoTags []string + Layers []string +} + +// tarDescriptor is the struct used inside the `manifest.json` file of a `docker save` tarball. +type tarDescriptor []singleImageTarDescriptor + +func (td tarDescriptor) findSpecifiedImageDescriptor(tag *name.Tag) (*singleImageTarDescriptor, error) { + if tag == nil { + if len(td) != 1 { + return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") + } + return &(td)[0], nil + } + for _, img := range td { + for _, tagStr := range img.RepoTags { + repoTag, err := name.NewTag(tagStr, name.WeakValidation) + if err != nil { + return nil, err + } + + // Compare the resolved names, since there are several ways to specify the same tag. + if repoTag.Name() == tag.Name() { + return &img, nil + } + } + } + return nil, fmt.Errorf("tag %s not found in tarball", tag) +} + +func (i *image) areLayersCompressed() (bool, error) { + if len(i.imgDescriptor.Layers) == 0 { + return false, errors.New("0 layers found in image") + } + layer := i.imgDescriptor.Layers[0] + blob, err := extractFileFromTar(i.opener, layer) + if err != nil { + return false, err + } + defer blob.Close() + return v1util.IsGzipped(blob) +} + +func (i *image) loadTarDescriptorAndConfig() error { + td, err := extractFileFromTar(i.opener, "manifest.json") + if err != nil { + return err + } + defer td.Close() + + if err := json.NewDecoder(td).Decode(&i.td); err != nil { + return err + } + + i.imgDescriptor, err = i.td.findSpecifiedImageDescriptor(i.tag) + if err != nil { + return err + } + + cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) + if err != nil { + return err + } + defer cfg.Close() + + i.config, err = ioutil.ReadAll(cfg) + if err != nil { + return err + } + return nil +} + +func (i *image) RawConfigFile() ([]byte, error) { + return i.config, nil +} + +// tarFile represents a single file inside a tar. Closing it closes the tar itself. +type tarFile struct { + io.Reader + io.Closer +} + +func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { + f, err := opener() + if err != nil { + return nil, err + } + tf := tar.NewReader(f) + for { + hdr, err := tf.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if hdr.Name == filePath { + return tarFile{ + Reader: tf, + Closer: f, + }, nil + } + } + return nil, fmt.Errorf("file %s not found in tar", filePath) +} + +// uncompressedLayerFromTarball implements partial.UncompressedLayer +type uncompressedLayerFromTarball struct { + diffID v1.Hash + opener Opener + filePath string +} + +// DiffID implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { + return ulft.diffID, nil +} + +// Uncompressed implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { + return extractFileFromTar(ulft.opener, ulft.filePath) +} + +func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + cfg, err := partial.ConfigFile(i) + if err != nil { + return nil, err + } + for idx, diffID := range cfg.RootFS.DiffIDs { + if diffID == h { + return &uncompressedLayerFromTarball{ + diffID: diffID, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, nil + } + } + return nil, fmt.Errorf("diff id %q not found", h) +} + +func (c *compressedImage) Manifest() (*v1.Manifest, error) { + c.manifestLock.Lock() + defer c.manifestLock.Unlock() + if c.manifest != nil { + return c.manifest, nil + } + + b, err := c.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + c.manifest = &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + for _, p := range c.imgDescriptor.Layers { + l, err := extractFileFromTar(c.opener, p) + if err != nil { + return nil, err + } + defer l.Close() + sha, size, err := v1.SHA256(l) + if err != nil { + return nil, err + } + c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ + MediaType: types.DockerLayer, + Size: size, + Digest: sha, + }) + } + return c.manifest, nil +} + +func (c *compressedImage) RawManifest() ([]byte, error) { + return partial.RawManifest(c) +} + +// compressedLayerFromTarball implements partial.CompressedLayer +type compressedLayerFromTarball struct { + digest v1.Hash + opener Opener + filePath string +} + +// Digest implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { + return clft.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { + return extractFileFromTar(clft.opener, clft.filePath) +} + +// Size implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Size() (int64, error) { + r, err := clft.Compressed() + if err != nil { + return -1, err + } + defer r.Close() + _, i, err := v1.SHA256(r) + return i, err +} + +func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + m, err := c.Manifest() + if err != nil { + return nil, err + } + for i, l := range m.Layers { + if l.Digest == h { + fp := c.imgDescriptor.Layers[i] + return &compressedLayerFromTarball{ + digest: h, + opener: c.opener, + filePath: fp, + }, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go new file mode 100644 index 00000000000..6d43ff7d49d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "compress/gzip" + "io" + "io/ioutil" + "os" + + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/v1util" +) + +type layer struct { + digest v1.Hash + diffID v1.Hash + size int64 + opener Opener + compressed bool +} + +func (l *layer) Digest() (v1.Hash, error) { + return l.digest, nil +} + +func (l *layer) DiffID() (v1.Hash, error) { + return l.diffID, nil +} + +func (l *layer) Compressed() (io.ReadCloser, error) { + rc, err := l.opener() + if err == nil && !l.compressed { + return v1util.GzipReadCloser(rc) + } + + return rc, err +} + +func (l *layer) Uncompressed() (io.ReadCloser, error) { + rc, err := l.opener() + if err == nil && l.compressed { + return v1util.GunzipReadCloser(rc) + } + + return rc, err +} + +func (l *layer) Size() (int64, error) { + return l.size, nil +} + +// LayerFromFile returns a v1.Layer given a tarball +func LayerFromFile(path string) (v1.Layer, error) { + opener := func() (io.ReadCloser, error) { + return os.Open(path) + } + return LayerFromOpener(opener) +} + +// LayerFromOpener returns a v1.Layer given an Opener function +func LayerFromOpener(opener Opener) (v1.Layer, error) { + rc, err := opener() + if err != nil { + return nil, err + } + defer rc.Close() + + compressed, err := v1util.IsGzipped(rc) + if err != nil { + return nil, err + } + + var digest v1.Hash + var size int64 + if digest, size, err = computeDigest(opener, compressed); err != nil { + return nil, err + } + + diffID, err := computeDiffID(opener, compressed) + if err != nil { + return nil, err + } + + return &layer{ + digest: digest, + diffID: diffID, + size: size, + compressed: compressed, + opener: opener, + }, nil +} + +func computeDigest(opener Opener, compressed bool) (v1.Hash, int64, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, 0, err + } + defer rc.Close() + + if compressed { + return v1.SHA256(rc) + } + + reader, err := v1util.GzipReadCloser(ioutil.NopCloser(rc)) + if err != nil { + return v1.Hash{}, 0, err + } + + return v1.SHA256(reader) +} + +func computeDiffID(opener Opener, compressed bool) (v1.Hash, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, err + } + defer rc.Close() + + if !compressed { + digest, _, err := v1.SHA256(rc) + return digest, err + } + + reader, err := gzip.NewReader(rc) + if err != nil { + return v1.Hash{}, err + } + + diffID, _, err := v1.SHA256(reader) + return diffID, err +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go new file mode 100644 index 00000000000..b0d45061ebc --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -0,0 +1,165 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1" +) + +// WriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.Write with a new file. +func WriteToFile(p string, tag name.Tag, img v1.Image) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return Write(tag, img, w) +} + +// MultiWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. +func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return MultiWrite(tagToImage, w) +} + +// Write is a wrapper to write a single image and tag to a tarball. +func Write(tag name.Tag, img v1.Image, w io.Writer) error { + return MultiWrite(map[name.Tag]v1.Image{tag: img}, w) +} + +// MultiWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error { + tf := tar.NewWriter(w) + defer tf.Close() + + imageToTags := dedupTagToImage(tagToImage) + var td tarDescriptor + + for img, tags := range imageToTags { + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return err + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return err + } + if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return err + } + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return err + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return err + } + + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + r, err := l.Compressed() + if err != nil { + return err + } + blobSize, err := l.Size() + if err != nil { + return err + } + + if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { + return err + } + } + + // Generate the tar descriptor and write it. + sitd := singleImageTarDescriptor{ + Config: cfgName.String(), + RepoTags: tags, + Layers: layerFiles, + } + + td = append(td, sitd) + } + + tdBytes, err := json.Marshal(td) + if err != nil { + return err + } + return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes))) +} + +func dedupTagToImage(tagToImage map[name.Tag]v1.Image) map[v1.Image][]string { + imageToTags := make(map[v1.Image][]string) + + for tag, img := range tagToImage { + if tags, ok := imageToTags[img]; ok { + imageToTags[img] = append(tags, tag.String()) + } else { + imageToTags[img] = []string{tag.String()} + } + } + + return imageToTags +} + +// write a file to the provided writer with a corresponding tar header +func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { + hdr := &tar.Header{ + Mode: 0644, + Typeflag: tar.TypeReg, + Size: size, + Name: path, + } + if err := tf.WriteHeader(hdr); err != nil { + return err + } + _, err := io.Copy(tf, r) + return err +} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 00000000000..abaf1e45f2a --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 00000000000..55ce4396034 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,34 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // The options used to create the Default logger. These are + // read only when the Default logger is created, so set them + // as soon as the process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Return a logger that is held globally. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +func Default() Logger { + protect.Do(func() { + def = New(DefaultOptions) + }) + + return def +} + +// A short alias for Default() +func L() Logger { + return Default() +} diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go new file mode 100644 index 00000000000..2aaa1f895d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/int.go @@ -0,0 +1,507 @@ +package hclog + +import ( + "bufio" + "bytes" + "encoding" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } +) + +// Given the options (nil for defaults), create a new Logger +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = os.Stderr + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mtx := opts.Mutex + if mtx == nil { + mtx = new(sync.Mutex) + } + + ret := &intLogger{ + m: mtx, + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + w: bufio.NewWriter(output), + level: new(int32), + } + if opts.TimeFormat != "" { + ret.timeFormat = opts.TimeFormat + } + atomic.StoreInt32(ret.level, int32(level)) + return ret +} + +// The internal logger implementation. Internal in that it is defined entirely +// by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // this is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + m *sync.Mutex + w *bufio.Writer + level *int32 + + implied []interface{} +} + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// The time format to use for logging. This is a version of RFC3339 that +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (z *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(z.level)) { + return + } + + t := time.Now() + + z.m.Lock() + defer z.m.Unlock() + + if z.json { + z.logJson(t, level, msg, args...) + } else { + z.log(t, level, msg, args...) + } + + z.w.Flush() +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + + // Find the last separator. + // + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + z.w.WriteString(t.Format(z.timeFormat)) + z.w.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + z.w.WriteString(s) + } else { + z.w.WriteString("[?????]") + } + + if z.caller { + if _, file, line, ok := runtime.Caller(3); ok { + z.w.WriteByte(' ') + z.w.WriteString(trimCallerPath(file)) + z.w.WriteByte(':') + z.w.WriteString(strconv.Itoa(line)) + z.w.WriteByte(':') + } + } + + z.w.WriteByte(' ') + + if z.name != "" { + z.w.WriteString(z.name) + z.w.WriteString(": ") + } + + z.w.WriteString(msg) + + args = append(z.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + z.w.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = z.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + z.w.WriteByte(' ') + z.w.WriteString(args[i].(string)) + z.w.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + z.w.WriteByte('"') + z.w.WriteString(val) + z.w.WriteByte('"') + } else { + z.w.WriteString(val) + } + } + } + + z.w.WriteString("\n") + + if stacktrace != "" { + z.w.WriteString(string(stacktrace)) + } +} + +func (z *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if z.name != "" { + vals["@module"] = z.name + } + + if z.caller { + if _, file, line, ok := runtime.Caller(3); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + + args = append(z.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(z.w).Encode(vals) + if err != nil { + panic(err) + } +} + +// Emit the message and args at DEBUG level +func (z *intLogger) Debug(msg string, args ...interface{}) { + z.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (z *intLogger) Trace(msg string, args ...interface{}) { + z.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (z *intLogger) Info(msg string, args ...interface{}) { + z.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (z *intLogger) Warn(msg string, args ...interface{}) { + z.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (z *intLogger) Error(msg string, args ...interface{}) { + z.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (z *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(z.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (z *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(z.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (z *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(z.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (z *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(z.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (z *intLogger) IsError() bool { + return Level(atomic.LoadInt32(z.level)) <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (z *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + + var nz intLogger = *z + + result := make(map[string]interface{}, len(z.implied)+len(args)) + keys := make([]string, 0, len(z.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(z.implied); i += 2 { + key := z.implied[i].(string) + keys = append(keys, key) + result[key] = z.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + nz.implied = make([]interface{}, 0, len(z.implied)+len(args)) + for _, k := range keys { + nz.implied = append(nz.implied, k) + nz.implied = append(nz.implied, result[k]) + } + + return &nz +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (z *intLogger) Named(name string) Logger { + var nz intLogger = *z + + if nz.name != "" { + nz.name = nz.name + "." + name + } else { + nz.name = name + } + + return &nz +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (z *intLogger) ResetNamed(name string) Logger { + var nz intLogger = *z + + nz.name = name + + return &nz +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (z *intLogger) SetLevel(level Level) { + atomic.StoreInt32(z.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) +} diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go new file mode 100644 index 00000000000..d98714e0a0b --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/log.go @@ -0,0 +1,161 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + DefaultOutput = os.Stderr + DefaultLevel = Info +) + +type Level int32 + +const ( + // This is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // The most verbose level. Intended to be used for the tracing of actions + // in code, such as function enters/exits, etc. + Trace Level = 1 + + // For programmer lowlevel analysis. + Debug Level = 2 + + // For information about steady state operations. + Info Level = 3 + + // For information about rare but handled events. + Warn Level = 4 + + // For information about unrecoverable events. + Error Level = 5 +) + +// When processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values to be +// formatted. For example: L.Info(Fmt{"%d beans/day", beans}). This is a simple +// convience type for when formatting is required. +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept "INFO" or "info" + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// The main Logger interface. All code should code against this interface only. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger +} + +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool +} + +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string +} diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 00000000000..0942361a52d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,47 @@ +package hclog + +import ( + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(ioutil.Discard, "", log.LstdFlags) +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 00000000000..8af1a3be4c0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,108 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// A stacktrace gathered by a previous call to log.Stacktrace. If passed +// to a logging function, the stacktrace will be appended. +type CapturedStacktrace string + +// Gather a stacktrace of the current goroutine and return it to be passed +// to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 00000000000..2bb927fc90c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + hl Logger + inferLevels bool +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case Trace: + s.hl.Trace(str) + case Debug: + s.hl.Debug(str) + case Info: + s.hl.Info(str) + case Warn: + s.hl.Warn(str) + case Error: + s.hl.Error(str) + default: + s.hl.Info(str) + } + } else { + s.hl.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 00000000000..42de0fc50c3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,973 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + + hclog "github.com/hashicorp/go-hclog" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // Wait for the command to end. + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } + + // Log and make sure to flush the logs write away + c.logger.Debug("plugin process exited", debugMsgArgs...) + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + + scanner := bufio.NewScanner(r) + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + for scanner.Scan() { + line := scanner.Text() + c.config.Stderr.Write([]byte(line + "\n")) + line = strings.TrimRightFunc(line, unicode.IsSpace) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + l.Debug(line) + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + } + } + } + + if err := scanner.Err(); err != nil { + l.Error("reading plugin stderr", "error", err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 00000000000..d22c566ed50 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 00000000000..22a7baa6a0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 00000000000..1a13780bc11 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/proto" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*proto.ConnInfo) error + Recv() (*proto.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *proto.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *proto.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *proto.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream proto.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *proto.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*proto.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client proto.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *proto.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: proto.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *proto.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *proto.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*proto.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *proto.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&proto.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *proto.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *proto.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 00000000000..e81f6bd60e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,111 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: proto.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller proto.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &proto.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 00000000000..aa4c38114d6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/proto" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *proto.Empty) (*proto.Empty, error) { + resp := &proto.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 00000000000..60df4a43dc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,142 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + proto.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + proto.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/proto/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/proto/gen.go new file mode 100644 index 00000000000..294ea4313c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/proto/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package proto diff --git a/vendor/github.com/hashicorp/go-plugin/internal/proto/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/proto/grpc_broker.pb.go new file mode 100644 index 00000000000..39522f336a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/proto/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "proto.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 164 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, + 0x53, 0x4a, 0xb1, 0x5c, 0x1c, 0xce, 0xf9, 0x79, 0x79, 0x9e, 0x79, 0x69, 0xf9, 0x42, 0xb2, 0x5c, + 0x5c, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0xf1, 0x99, 0x29, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, + 0xbc, 0x41, 0x9c, 0x50, 0x11, 0xcf, 0x14, 0x21, 0x09, 0x2e, 0xf6, 0xbc, 0xd4, 0x92, 0xf2, 0xfc, + 0xa2, 0x6c, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x18, 0x17, 0x24, 0x93, 0x98, 0x92, 0x52, + 0x94, 0x5a, 0x5c, 0x2c, 0xc1, 0x0c, 0x91, 0x81, 0x72, 0x8d, 0x1c, 0xb9, 0xb8, 0xdc, 0x83, 0x02, + 0x9c, 0x9d, 0xc0, 0x36, 0x0b, 0x19, 0x73, 0x71, 0x07, 0x97, 0x24, 0x16, 0x95, 0x04, 0x97, 0x14, + 0xa5, 0x26, 0xe6, 0x0a, 0xf1, 0x43, 0x9c, 0xa2, 0x07, 0x73, 0x80, 0x14, 0xba, 0x80, 0x06, 0xa3, + 0x01, 0x63, 0x12, 0x1b, 0x58, 0xcc, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xda, 0xd5, 0x84, + 0xc4, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/proto.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/proto/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/proto/grpc_controller.pb.go new file mode 100644 index 00000000000..bb780d7fff4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/proto/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "proto.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 97 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x05, 0x53, 0x4a, 0xec, 0x5c, 0xac, 0xae, 0xb9, 0x05, 0x25, 0x95, 0x46, 0x16, + 0x5c, 0x7c, 0xee, 0x41, 0x01, 0xce, 0xce, 0x70, 0x75, 0x42, 0x6a, 0x5c, 0x1c, 0xc1, 0x19, 0xa5, + 0x25, 0x29, 0xf9, 0xe5, 0x79, 0x42, 0x3c, 0x10, 0x5d, 0x7a, 0x60, 0xb5, 0x52, 0x28, 0xbc, 0x24, + 0x36, 0x30, 0xc7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x69, 0xa1, 0xad, 0x79, 0x69, 0x00, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/proto.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 00000000000..2996c14c3cb --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input string) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal([]byte(input), &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 00000000000..88955245877 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 00000000000..01c45ad7c68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 00000000000..79d9674633a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 00000000000..88c999a580d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 00000000000..70ba546bf6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 00000000000..9f7b0180901 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 00000000000..0cfc19e52d6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 00000000000..f30a4b1d387 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 00000000000..5bb18dd5db1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,197 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 00000000000..fc9f05a9fbc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,432 @@ +package plugin + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to os.Stderr. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Accept connections and wait for completion + go server.Serve(listener) + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) + if err != nil { + return nil, err + } + + maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) + if err != nil { + return nil, err + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 00000000000..033079ea0fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 00000000000..1d547aaaab3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 00000000000..8a8d11560a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/proto" + "github.com/mitchellh/go-testing-interface" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: proto.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 00000000000..f0e5c79e181 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 00000000000..be6ebca9c78 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 00000000000..4f52938287f --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 00000000000..18a078c8ad9 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,98 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 00000000000..a80ddec35ea --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,653 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 00000000000..aa239197398 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,470 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= length + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 00000000000..8a73e9249a6 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/krishicks/yaml-patch/LICENSE b/vendor/github.com/krishicks/yaml-patch/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/krishicks/yaml-patch/container.go b/vendor/github.com/krishicks/yaml-patch/container.go new file mode 100644 index 00000000000..bdc22f143de --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/container.go @@ -0,0 +1,167 @@ +package yamlpatch + +import ( + "fmt" + "strconv" + "strings" +) + +// Container is the interface for performing operations on Nodes +type Container interface { + Get(key string) (*Node, error) + Set(key string, val *Node) error + Add(key string, val *Node) error + Remove(key string) error +} + +type nodeMap map[interface{}]*Node + +func (n *nodeMap) Set(key string, val *Node) error { + (*n)[key] = val + return nil +} + +func (n *nodeMap) Add(key string, val *Node) error { + (*n)[key] = val + return nil +} + +func (n *nodeMap) Get(key string) (*Node, error) { + return (*n)[key], nil +} + +func (n *nodeMap) Remove(key string) error { + _, ok := (*n)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*n, key) + return nil +} + +type nodeSlice []*Node + +func (n *nodeSlice) Set(index string, val *Node) error { + i, err := strconv.Atoi(index) + if err != nil { + return err + } + + sz := len(*n) + if i+1 > sz { + sz = i + 1 + } + + ary := make([]*Node, sz) + + cur := *n + + copy(ary, cur) + + if i >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", i) + } + + ary[i] = val + + *n = ary + return nil +} + +func (n *nodeSlice) Add(index string, val *Node) error { + if index == "-" { + *n = append(*n, val) + return nil + } + + i, err := strconv.Atoi(index) + if err != nil { + return err + } + + ary := make([]*Node, len(*n)+1) + + cur := *n + + copy(ary[0:i], cur[0:i]) + ary[i] = val + copy(ary[i+1:], cur[i:]) + + *n = ary + return nil +} + +func (n *nodeSlice) Get(index string) (*Node, error) { + i, err := strconv.Atoi(index) + if err != nil { + return nil, err + } + + if i >= 0 && i <= len(*n)-1 { + return (*n)[i], nil + } + + return nil, fmt.Errorf("Unable to access invalid index: %d", i) +} + +func (n *nodeSlice) Remove(index string) error { + i, err := strconv.Atoi(index) + if err != nil { + return err + } + + cur := *n + + if i >= len(cur) { + return fmt.Errorf("Unable to remove invalid index: %d", i) + } + + ary := make([]*Node, len(cur)-1) + + copy(ary[0:i], cur[0:i]) + copy(ary[i:], cur[i+1:]) + + *n = ary + return nil + +} + +func findContainer(c Container, path *OpPath) (Container, string, error) { + parts, key, err := path.Decompose() + if err != nil { + return nil, "", err + } + + foundContainer := c + + for _, part := range parts { + node, err := foundContainer.Get(decodePatchKey(part)) + if err != nil { + return nil, "", err + } + + if node == nil { + return nil, "", fmt.Errorf("path does not exist: %s", path) + } + + foundContainer = node.Container() + } + + return foundContainer, decodePatchKey(key), nil +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/krishicks/yaml-patch/node.go b/vendor/github.com/krishicks/yaml-patch/node.go new file mode 100644 index 00000000000..4837c8a983e --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/node.go @@ -0,0 +1,83 @@ +package yamlpatch + +import "reflect" + +// Node holds a YAML document that has not yet been processed into a NodeMap or +// NodeSlice +type Node struct { + raw *interface{} + container Container +} + +// NewNode returns a new Node. It expects a pointer to an interface{} +func NewNode(raw *interface{}) *Node { + return &Node{ + raw: raw, + } +} + +// MarshalYAML implements yaml.Marshaler, and returns the correct interface{} +// to be marshaled +func (n *Node) MarshalYAML() (interface{}, error) { + if n.container != nil { + return n.container, nil + } + + return *n.raw, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler +func (n *Node) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data interface{} + + err := unmarshal(&data) + if err != nil { + return err + } + + n.raw = &data + return nil +} + +// Empty returns whether the raw value is nil +func (n *Node) Empty() bool { + return *n.raw == nil +} + +// Container returns the node as a Container +func (n *Node) Container() Container { + if n.container != nil { + return n.container + } + + switch rt := (*n.raw).(type) { + case []interface{}: + c := make(nodeSlice, len(rt)) + n.container = &c + + for i := range rt { + c[i] = NewNode(&rt[i]) + } + case map[interface{}]interface{}: + c := make(nodeMap, len(rt)) + n.container = &c + + for k := range rt { + v := rt[k] + c[k] = NewNode(&v) + } + } + + return n.container +} + +// Equal compares the values of the raw interfaces that the YAML was +// unmarshaled into +func (n *Node) Equal(other *Node) bool { + return reflect.DeepEqual(*n.raw, *other.raw) +} + +// Value returns the raw value of the node +func (n *Node) Value() interface{} { + return *n.raw +} diff --git a/vendor/github.com/krishicks/yaml-patch/operation.go b/vendor/github.com/krishicks/yaml-patch/operation.go new file mode 100644 index 00000000000..69353c77d6d --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/operation.go @@ -0,0 +1,181 @@ +package yamlpatch + +import ( + "errors" + "fmt" + "strings" +) + +// Op is a type alias +type Op string + +// Ops +const ( + opAdd Op = "add" + opRemove Op = "remove" + opReplace Op = "replace" + opMove Op = "move" + opCopy Op = "copy" + opTest Op = "test" +) + +// OpPath is an RFC6902 'pointer' +type OpPath string + +// Decompose returns the pointer's components: +// "/foo" => [], "foo" +// "/foo/1" => ["foo"], "1" +// "/foo/1/bar" => ["foo", "1"], "bar" +func (p *OpPath) Decompose() ([]string, string, error) { + path := string(*p) + + if !strings.HasPrefix(path, "/") { + return nil, "", fmt.Errorf("operation path is missing leading '/': %s", path) + } + + parts := strings.Split(path, "/")[1:] + + return parts[:len(parts)-1], parts[len(parts)-1], nil +} + +// ContainsExtendedSyntax returns whether the OpPath uses the "key=value" +// format, as in "/foo/name=bar", where /foo points at an array that contains +// an object with a key "name" that has a value "bar" +func (p *OpPath) ContainsExtendedSyntax() bool { + return strings.Contains(string(*p), "=") +} + +// String returns the OpPath as a string +func (p *OpPath) String() string { + return string(*p) +} + +// Operation is an RFC6902 'Operation' +// https://tools.ietf.org/html/rfc6902#section-4 +type Operation struct { + Op Op `yaml:"op,omitempty"` + Path OpPath `yaml:"path,omitempty"` + From OpPath `yaml:"from,omitempty"` + Value *Node `yaml:"value,omitempty"` +} + +// Perform executes the operation on the given container +func (o *Operation) Perform(c Container) error { + var err error + + switch o.Op { + case opAdd: + err = tryAdd(c, o) + case opRemove: + err = tryRemove(c, o) + case opReplace: + err = tryReplace(c, o) + case opMove: + err = tryMove(c, o) + case opCopy: + err = tryCopy(c, o) + case opTest: + err = tryTest(c, o) + default: + err = fmt.Errorf("Unexpected op: %s", o.Op) + } + + return err +} + +func tryAdd(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch add operation does not apply: doc is missing path: %s", op.Path) + } + + return con.Add(key, op.Value) +} + +func tryRemove(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch remove operation does not apply: doc is missing path: %s", op.Path) + } + + return con.Remove(key) +} + +func tryReplace(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch replace operation does not apply: doc is missing path: %s", op.Path) + } + + val, err := con.Get(key) + if val == nil || err != nil { + return fmt.Errorf("yamlpatch replace operation does not apply: doc is missing key: %s", op.Path) + } + + return con.Set(key, op.Value) +} + +func tryMove(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.From) + if err != nil { + return fmt.Errorf("yamlpatch move operation does not apply: doc is missing from path: %s", op.From) + } + + val, err := con.Get(key) + if err != nil { + return err + } + + err = con.Remove(key) + if err != nil { + return err + } + + con, key, err = findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch move operation does not apply: doc is missing destination path: %s", op.Path) + } + + return con.Set(key, val) +} + +func tryCopy(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.From) + if err != nil { + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s", op.From) + } + + val, err := con.Get(key) + if err != nil { + return err + } + + con, key, err = findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s", op.Path) + } + + return con.Set(key, val) +} + +func tryTest(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("test operation does not apply: doc is missing from path: %s", op.From) + } + + val, err := con.Get(key) + if err != nil { + return err + } + + if op.Value.Empty() && val == nil { + return nil + } + + if op.Value.Equal(val) { + return nil + } + + return errors.New("test failed") +} diff --git a/vendor/github.com/krishicks/yaml-patch/patch.go b/vendor/github.com/krishicks/yaml-patch/patch.go new file mode 100644 index 00000000000..910f39eb952 --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/patch.go @@ -0,0 +1,60 @@ +package yamlpatch + +import ( + "fmt" + + yaml "gopkg.in/yaml.v2" +) + +// Patch is an ordered collection of operations. +type Patch []Operation + +// DecodePatch decodes the passed YAML document as if it were an RFC 6902 patch +func DecodePatch(bs []byte) (Patch, error) { + var p Patch + + err := yaml.Unmarshal(bs, &p) + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply returns a YAML document that has been mutated per the patch +func (p Patch) Apply(doc []byte) ([]byte, error) { + var iface interface{} + err := yaml.Unmarshal(doc, &iface) + if err != nil { + return nil, fmt.Errorf("failed unmarshaling doc: %s\n\n%s", string(doc), err) + } + + var c Container + c = NewNode(&iface).Container() + + for _, op := range p { + pathfinder := NewPathFinder(c) + if op.Path.ContainsExtendedSyntax() { + paths := pathfinder.Find(string(op.Path)) + if paths == nil { + return nil, fmt.Errorf("could not expand pointer: %s", op.Path) + } + + for _, path := range paths { + newOp := op + newOp.Path = OpPath(path) + err = newOp.Perform(c) + if err != nil { + return nil, err + } + } + } else { + err = op.Perform(c) + if err != nil { + return nil, err + } + } + } + + return yaml.Marshal(c) +} diff --git a/vendor/github.com/krishicks/yaml-patch/pathfinder.go b/vendor/github.com/krishicks/yaml-patch/pathfinder.go new file mode 100644 index 00000000000..06cfb133347 --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/pathfinder.go @@ -0,0 +1,109 @@ +package yamlpatch + +import ( + "fmt" + "strings" +) + +// PathFinder can be used to find RFC6902-standard paths given non-standard +// (key=value) pointer syntax +type PathFinder struct { + root Container +} + +// NewPathFinder takes an interface that represents a YAML document and returns +// a new PathFinder +func NewPathFinder(container Container) *PathFinder { + return &PathFinder{ + root: container, + } +} + +// Find expands the given path into all matching paths, returning the canonical +// versions of those matching paths +func (p *PathFinder) Find(path string) []string { + parts := strings.Split(path, "/") + + if parts[1] == "" { + return []string{"/"} + } + + routes := map[string]Container{ + "": p.root, + } + + for _, part := range parts[1:] { + routes = find(decodePatchKey(part), routes) + } + + var paths []string + for k := range routes { + paths = append(paths, k) + } + + return paths +} + +func find(part string, routes map[string]Container) map[string]Container { + matches := map[string]Container{} + + for prefix, container := range routes { + if part == "-" { + for k := range routes { + matches[fmt.Sprintf("%s/-", k)] = routes[k] + } + return matches + } + + if kv := strings.Split(part, "="); len(kv) == 2 { + if newMatches := findAll(prefix, kv[0], kv[1], container); len(newMatches) > 0 { + matches = newMatches + } + continue + } + + if node, err := container.Get(part); err == nil { + path := fmt.Sprintf("%s/%s", prefix, part) + if node == nil { + matches[path] = container + } else { + matches[path] = node.Container() + } + } + } + + return matches +} + +func findAll(prefix, findKey, findValue string, container Container) map[string]Container { + if container == nil { + return nil + } + + if v, err := container.Get(findKey); err == nil && v != nil { + if vs, ok := v.Value().(string); ok && vs == findValue { + return map[string]Container{ + prefix: container, + } + } + } + + matches := map[string]Container{} + + switch it := container.(type) { + case *nodeMap: + for k, v := range *it { + for route, match := range findAll(fmt.Sprintf("%s/%s", prefix, k), findKey, findValue, v.Container()) { + matches[route] = match + } + } + case *nodeSlice: + for i, v := range *it { + for route, match := range findAll(fmt.Sprintf("%s/%d", prefix, i), findKey, findValue, v.Container()) { + matches[route] = match + } + } + } + + return matches +} diff --git a/vendor/github.com/krishicks/yaml-patch/placeholder_wrapper.go b/vendor/github.com/krishicks/yaml-patch/placeholder_wrapper.go new file mode 100644 index 00000000000..cbcc22a8420 --- /dev/null +++ b/vendor/github.com/krishicks/yaml-patch/placeholder_wrapper.go @@ -0,0 +1,51 @@ +package yamlpatch + +import ( + "fmt" + "regexp" +) + +// PlaceholderWrapper can be used to wrap placeholders that make YAML invalid +// in single quotes to make otherwise valid YAML +type PlaceholderWrapper struct { + LeftSide string + RightSide string + unwrappedRegex *regexp.Regexp + wrappedRegex *regexp.Regexp +} + +// NewPlaceholderWrapper returns a new PlaceholderWrapper which knows how to +// wrap and unwrap the provided left and right sides of a placeholder, e.g. {{ +// and }} +func NewPlaceholderWrapper(left, right string) *PlaceholderWrapper { + escapedLeft := regexp.QuoteMeta(left) + escapedRight := regexp.QuoteMeta(right) + unwrappedRegex := regexp.MustCompile(`\s` + escapedLeft + `([^` + escapedRight + `]+)` + escapedRight) + wrappedRegex := regexp.MustCompile(`\s'` + escapedLeft + `([^` + escapedRight + `]+)` + escapedRight + `'`) + + return &PlaceholderWrapper{ + LeftSide: left, + RightSide: right, + unwrappedRegex: unwrappedRegex, + wrappedRegex: wrappedRegex, + } +} + +// Wrap the placeholder in single quotes to make it valid YAML +func (w *PlaceholderWrapper) Wrap(input []byte) []byte { + if !w.unwrappedRegex.Match(input) { + return input + } + + return w.unwrappedRegex.ReplaceAll(input, []byte(fmt.Sprintf(` '%s$1%s'`, w.LeftSide, w.RightSide))) +} + +// Unwrap the single quotes from the placeholder to make it invalid YAML +// (again) +func (w *PlaceholderWrapper) Unwrap(input []byte) []byte { + if !w.wrappedRegex.Match(input) { + return input + } + + return w.wrappedRegex.ReplaceAll(input, []byte(fmt.Sprintf(` %s$1%s`, w.LeftSide, w.RightSide))) +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/LICENSE b/vendor/github.com/lucasb-eyer/go-colorful/LICENSE new file mode 100644 index 00000000000..4e402a00e52 --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2013 Lucas Beyer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go b/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go new file mode 100644 index 00000000000..2e2e49e19fc --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go @@ -0,0 +1,55 @@ +// Various ways to generate single random colors + +package colorful + +import ( + "math/rand" +) + +// Creates a random dark, "warm" color through a restricted HSV space. +func FastWarmColor() Color { + return Hsv( + rand.Float64()*360.0, + 0.5+rand.Float64()*0.3, + 0.3+rand.Float64()*0.3) +} + +// Creates a random dark, "warm" color through restricted HCL space. +// This is slower than FastWarmColor but will likely give you colors which have +// the same "warmness" if you run it many times. +func WarmColor() (c Color) { + for c = randomWarm(); !c.IsValid(); c = randomWarm() { + } + return +} + +func randomWarm() Color { + return Hcl( + rand.Float64()*360.0, + 0.1+rand.Float64()*0.3, + 0.2+rand.Float64()*0.3) +} + +// Creates a random bright, "pimpy" color through a restricted HSV space. +func FastHappyColor() Color { + return Hsv( + rand.Float64()*360.0, + 0.7+rand.Float64()*0.3, + 0.6+rand.Float64()*0.3) +} + +// Creates a random bright, "pimpy" color through restricted HCL space. +// This is slower than FastHappyColor but will likely give you colors which +// have the same "brightness" if you run it many times. +func HappyColor() (c Color) { + for c = randomPimp(); !c.IsValid(); c = randomPimp() { + } + return +} + +func randomPimp() Color { + return Hcl( + rand.Float64()*360.0, + 0.5+rand.Float64()*0.3, + 0.5+rand.Float64()*0.3) +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/colors.go b/vendor/github.com/lucasb-eyer/go-colorful/colors.go new file mode 100644 index 00000000000..febf94c7afd --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/colors.go @@ -0,0 +1,819 @@ +// The colorful package provides all kinds of functions for working with colors. +package colorful + +import ( + "fmt" + "image/color" + "math" +) + +// A color is stored internally using sRGB (standard RGB) values in the range 0-1 +type Color struct { + R, G, B float64 +} + +// Implement the Go color.Color interface. +func (col Color) RGBA() (r, g, b, a uint32) { + r = uint32(col.R*65535.0 + 0.5) + g = uint32(col.G*65535.0 + 0.5) + b = uint32(col.B*65535.0 + 0.5) + a = 0xFFFF + return +} + +// Constructs a colorful.Color from something implementing color.Color +func MakeColor(col color.Color) (Color, bool) { + r, g, b, a := col.RGBA() + if a == 0 { + return Color{0, 0, 0}, false + } + + // Since color.Color is alpha pre-multiplied, we need to divide the + // RGB values by alpha again in order to get back the original RGB. + r *= 0xffff + r /= a + g *= 0xffff + g /= a + b *= 0xffff + b /= a + + return Color{float64(r) / 65535.0, float64(g) / 65535.0, float64(b) / 65535.0}, true +} + +// Might come in handy sometimes to reduce boilerplate code. +func (col Color) RGB255() (r, g, b uint8) { + r = uint8(col.R*255.0 + 0.5) + g = uint8(col.G*255.0 + 0.5) + b = uint8(col.B*255.0 + 0.5) + return +} + +// This is the tolerance used when comparing colors using AlmostEqualRgb. +const Delta = 1.0 / 255.0 + +// This is the default reference white point. +var D65 = [3]float64{0.95047, 1.00000, 1.08883} + +// And another one. +var D50 = [3]float64{0.96422, 1.00000, 0.82521} + +// Checks whether the color exists in RGB space, i.e. all values are in [0..1] +func (c Color) IsValid() bool { + return 0.0 <= c.R && c.R <= 1.0 && + 0.0 <= c.G && c.G <= 1.0 && + 0.0 <= c.B && c.B <= 1.0 +} + +func clamp01(v float64) float64 { + return math.Max(0.0, math.Min(v, 1.0)) +} + +// Returns Clamps the color into valid range, clamping each value to [0..1] +// If the color is valid already, this is a no-op. +func (c Color) Clamped() Color { + return Color{clamp01(c.R), clamp01(c.G), clamp01(c.B)} +} + +func sq(v float64) float64 { + return v * v +} + +func cub(v float64) float64 { + return v * v * v +} + +// DistanceRgb computes the distance between two colors in RGB space. +// This is not a good measure! Rather do it in Lab space. +func (c1 Color) DistanceRgb(c2 Color) float64 { + return math.Sqrt(sq(c1.R-c2.R) + sq(c1.G-c2.G) + sq(c1.B-c2.B)) +} + +// Check for equality between colors within the tolerance Delta (1/255). +func (c1 Color) AlmostEqualRgb(c2 Color) bool { + return math.Abs(c1.R-c2.R)+ + math.Abs(c1.G-c2.G)+ + math.Abs(c1.B-c2.B) < 3.0*Delta +} + +// You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl. +func (c1 Color) BlendRgb(c2 Color, t float64) Color { + return Color{c1.R + t*(c2.R-c1.R), + c1.G + t*(c2.G-c1.G), + c1.B + t*(c2.B-c1.B)} +} + +// Utility used by Hxx color-spaces for interpolating between two angles in [0,360]. +func interp_angle(a0, a1, t float64) float64 { + // Based on the answer here: http://stackoverflow.com/a/14498790/2366315 + // With potential proof that it works here: http://math.stackexchange.com/a/2144499 + delta := math.Mod(math.Mod(a1-a0, 360.0)+540, 360.0) - 180.0 + return math.Mod(a0+t*delta+360.0, 360.0) +} + +/// HSV /// +/////////// +// From http://en.wikipedia.org/wiki/HSL_and_HSV +// Note that h is in [0..360] and s,v in [0..1] + +// Hsv returns the Hue [0..360], Saturation and Value [0..1] of the color. +func (col Color) Hsv() (h, s, v float64) { + min := math.Min(math.Min(col.R, col.G), col.B) + v = math.Max(math.Max(col.R, col.G), col.B) + C := v - min + + s = 0.0 + if v != 0.0 { + s = C / v + } + + h = 0.0 // We use 0 instead of undefined as in wp. + if min != v { + if v == col.R { + h = math.Mod((col.G-col.B)/C, 6.0) + } + if v == col.G { + h = (col.B-col.R)/C + 2.0 + } + if v == col.B { + h = (col.R-col.G)/C + 4.0 + } + h *= 60.0 + if h < 0.0 { + h += 360.0 + } + } + return +} + +// Hsv creates a new Color given a Hue in [0..360], a Saturation and a Value in [0..1] +func Hsv(H, S, V float64) Color { + Hp := H / 60.0 + C := V * S + X := C * (1.0 - math.Abs(math.Mod(Hp, 2.0)-1.0)) + + m := V - C + r, g, b := 0.0, 0.0, 0.0 + + switch { + case 0.0 <= Hp && Hp < 1.0: + r = C + g = X + case 1.0 <= Hp && Hp < 2.0: + r = X + g = C + case 2.0 <= Hp && Hp < 3.0: + g = C + b = X + case 3.0 <= Hp && Hp < 4.0: + g = X + b = C + case 4.0 <= Hp && Hp < 5.0: + r = X + b = C + case 5.0 <= Hp && Hp < 6.0: + r = C + b = X + } + + return Color{m + r, m + g, m + b} +} + +// You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl. +func (c1 Color) BlendHsv(c2 Color, t float64) Color { + h1, s1, v1 := c1.Hsv() + h2, s2, v2 := c2.Hsv() + + // We know that h are both in [0..360] + return Hsv(interp_angle(h1, h2, t), s1+t*(s2-s1), v1+t*(v2-v1)) +} + +/// HSL /// +/////////// + +// Hsl returns the Hue [0..360], Saturation [0..1], and Luminance (lightness) [0..1] of the color. +func (col Color) Hsl() (h, s, l float64) { + min := math.Min(math.Min(col.R, col.G), col.B) + max := math.Max(math.Max(col.R, col.G), col.B) + + l = (max + min) / 2 + + if min == max { + s = 0 + h = 0 + } else { + if l < 0.5 { + s = (max - min) / (max + min) + } else { + s = (max - min) / (2.0 - max - min) + } + + if max == col.R { + h = (col.G - col.B) / (max - min) + } else if max == col.G { + h = 2.0 + (col.B-col.R)/(max-min) + } else { + h = 4.0 + (col.R-col.G)/(max-min) + } + + h *= 60 + + if h < 0 { + h += 360 + } + } + + return +} + +// Hsl creates a new Color given a Hue in [0..360], a Saturation [0..1], and a Luminance (lightness) in [0..1] +func Hsl(h, s, l float64) Color { + if s == 0 { + return Color{l, l, l} + } + + var r, g, b float64 + var t1 float64 + var t2 float64 + var tr float64 + var tg float64 + var tb float64 + + if l < 0.5 { + t1 = l * (1.0 + s) + } else { + t1 = l + s - l*s + } + + t2 = 2*l - t1 + h = h / 360 + tr = h + 1.0/3.0 + tg = h + tb = h - 1.0/3.0 + + if tr < 0 { + tr++ + } + if tr > 1 { + tr-- + } + if tg < 0 { + tg++ + } + if tg > 1 { + tg-- + } + if tb < 0 { + tb++ + } + if tb > 1 { + tb-- + } + + // Red + if 6*tr < 1 { + r = t2 + (t1-t2)*6*tr + } else if 2*tr < 1 { + r = t1 + } else if 3*tr < 2 { + r = t2 + (t1-t2)*(2.0/3.0-tr)*6 + } else { + r = t2 + } + + // Green + if 6*tg < 1 { + g = t2 + (t1-t2)*6*tg + } else if 2*tg < 1 { + g = t1 + } else if 3*tg < 2 { + g = t2 + (t1-t2)*(2.0/3.0-tg)*6 + } else { + g = t2 + } + + // Blue + if 6*tb < 1 { + b = t2 + (t1-t2)*6*tb + } else if 2*tb < 1 { + b = t1 + } else if 3*tb < 2 { + b = t2 + (t1-t2)*(2.0/3.0-tb)*6 + } else { + b = t2 + } + + return Color{r, g, b} +} + +/// Hex /// +/////////// + +// Hex returns the hex "html" representation of the color, as in #ff0080. +func (col Color) Hex() string { + // Add 0.5 for rounding + return fmt.Sprintf("#%02x%02x%02x", uint8(col.R*255.0+0.5), uint8(col.G*255.0+0.5), uint8(col.B*255.0+0.5)) +} + +// Hex parses a "html" hex color-string, either in the 3 "#f0c" or 6 "#ff1034" digits form. +func Hex(scol string) (Color, error) { + format := "#%02x%02x%02x" + factor := 1.0 / 255.0 + if len(scol) == 4 { + format = "#%1x%1x%1x" + factor = 1.0 / 15.0 + } + + var r, g, b uint8 + n, err := fmt.Sscanf(scol, format, &r, &g, &b) + if err != nil { + return Color{}, err + } + if n != 3 { + return Color{}, fmt.Errorf("color: %v is not a hex-color", scol) + } + + return Color{float64(r) * factor, float64(g) * factor, float64(b) * factor}, nil +} + +/// Linear /// +////////////// +// http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/ +// http://www.brucelindbloom.com/Eqn_RGB_to_XYZ.html + +func linearize(v float64) float64 { + if v <= 0.04045 { + return v / 12.92 + } + return math.Pow((v+0.055)/1.055, 2.4) +} + +// LinearRgb converts the color into the linear RGB space (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/). +func (col Color) LinearRgb() (r, g, b float64) { + r = linearize(col.R) + g = linearize(col.G) + b = linearize(col.B) + return +} + +// A much faster and still quite precise linearization using a 6th-order Taylor approximation. +// See the accompanying Jupyter notebook for derivation of the constants. +func linearize_fast(v float64) float64 { + v1 := v - 0.5 + v2 := v1 * v1 + v3 := v2 * v1 + v4 := v2 * v2 + //v5 := v3*v2 + return -0.248750514614486 + 0.925583310193438*v + 1.16740237321695*v2 + 0.280457026598666*v3 - 0.0757991963780179*v4 //+ 0.0437040411548932*v5 +} + +// FastLinearRgb is much faster than and almost as accurate as LinearRgb. +// BUT it is important to NOTE that they only produce good results for valid colors r,g,b in [0,1]. +func (col Color) FastLinearRgb() (r, g, b float64) { + r = linearize_fast(col.R) + g = linearize_fast(col.G) + b = linearize_fast(col.B) + return +} + +func delinearize(v float64) float64 { + if v <= 0.0031308 { + return 12.92 * v + } + return 1.055*math.Pow(v, 1.0/2.4) - 0.055 +} + +// LinearRgb creates an sRGB color out of the given linear RGB color (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/). +func LinearRgb(r, g, b float64) Color { + return Color{delinearize(r), delinearize(g), delinearize(b)} +} + +func delinearize_fast(v float64) float64 { + // This function (fractional root) is much harder to linearize, so we need to split. + if v > 0.2 { + v1 := v - 0.6 + v2 := v1 * v1 + v3 := v2 * v1 + v4 := v2 * v2 + v5 := v3 * v2 + return 0.442430344268235 + 0.592178981271708*v - 0.287864782562636*v2 + 0.253214392068985*v3 - 0.272557158129811*v4 + 0.325554383321718*v5 + } else if v > 0.03 { + v1 := v - 0.115 + v2 := v1 * v1 + v3 := v2 * v1 + v4 := v2 * v2 + v5 := v3 * v2 + return 0.194915592891669 + 1.55227076330229*v - 3.93691860257828*v2 + 18.0679839248761*v3 - 101.468750302746*v4 + 632.341487393927*v5 + } else { + v1 := v - 0.015 + v2 := v1 * v1 + v3 := v2 * v1 + v4 := v2 * v2 + v5 := v3 * v2 + // You can clearly see from the involved constants that the low-end is highly nonlinear. + return 0.0519565234928877 + 5.09316778537561*v - 99.0338180489702*v2 + 3484.52322764895*v3 - 150028.083412663*v4 + 7168008.42971613*v5 + } +} + +// FastLinearRgb is much faster than and almost as accurate as LinearRgb. +// BUT it is important to NOTE that they only produce good results for valid inputs r,g,b in [0,1]. +func FastLinearRgb(r, g, b float64) Color { + return Color{delinearize_fast(r), delinearize_fast(g), delinearize_fast(b)} +} + +// XyzToLinearRgb converts from CIE XYZ-space to Linear RGB space. +func XyzToLinearRgb(x, y, z float64) (r, g, b float64) { + r = 3.2404542*x - 1.5371385*y - 0.4985314*z + g = -0.9692660*x + 1.8760108*y + 0.0415560*z + b = 0.0556434*x - 0.2040259*y + 1.0572252*z + return +} + +func LinearRgbToXyz(r, g, b float64) (x, y, z float64) { + x = 0.4124564*r + 0.3575761*g + 0.1804375*b + y = 0.2126729*r + 0.7151522*g + 0.0721750*b + z = 0.0193339*r + 0.1191920*g + 0.9503041*b + return +} + +/// XYZ /// +/////////// +// http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/ + +func (col Color) Xyz() (x, y, z float64) { + return LinearRgbToXyz(col.LinearRgb()) +} + +func Xyz(x, y, z float64) Color { + return LinearRgb(XyzToLinearRgb(x, y, z)) +} + +/// xyY /// +/////////// +// http://www.brucelindbloom.com/Eqn_XYZ_to_xyY.html + +// Well, the name is bad, since it's xyY but Golang needs me to start with a +// capital letter to make the method public. +func XyzToXyy(X, Y, Z float64) (x, y, Yout float64) { + return XyzToXyyWhiteRef(X, Y, Z, D65) +} + +func XyzToXyyWhiteRef(X, Y, Z float64, wref [3]float64) (x, y, Yout float64) { + Yout = Y + N := X + Y + Z + if math.Abs(N) < 1e-14 { + // When we have black, Bruce Lindbloom recommends to use + // the reference white's chromacity for x and y. + x = wref[0] / (wref[0] + wref[1] + wref[2]) + y = wref[1] / (wref[0] + wref[1] + wref[2]) + } else { + x = X / N + y = Y / N + } + return +} + +func XyyToXyz(x, y, Y float64) (X, Yout, Z float64) { + Yout = Y + + if -1e-14 < y && y < 1e-14 { + X = 0.0 + Z = 0.0 + } else { + X = Y / y * x + Z = Y / y * (1.0 - x - y) + } + + return +} + +// Converts the given color to CIE xyY space using D65 as reference white. +// (Note that the reference white is only used for black input.) +// x, y and Y are in [0..1] +func (col Color) Xyy() (x, y, Y float64) { + return XyzToXyy(col.Xyz()) +} + +// Converts the given color to CIE xyY space, taking into account +// a given reference white. (i.e. the monitor's white) +// (Note that the reference white is only used for black input.) +// x, y and Y are in [0..1] +func (col Color) XyyWhiteRef(wref [3]float64) (x, y, Y float64) { + X, Y2, Z := col.Xyz() + return XyzToXyyWhiteRef(X, Y2, Z, wref) +} + +// Generates a color by using data given in CIE xyY space. +// x, y and Y are in [0..1] +func Xyy(x, y, Y float64) Color { + return Xyz(XyyToXyz(x, y, Y)) +} + +/// L*a*b* /// +////////////// +// http://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions +// For L*a*b*, we need to L*a*b*<->XYZ->RGB and the first one is device dependent. + +func lab_f(t float64) float64 { + if t > 6.0/29.0*6.0/29.0*6.0/29.0 { + return math.Cbrt(t) + } + return t/3.0*29.0/6.0*29.0/6.0 + 4.0/29.0 +} + +func XyzToLab(x, y, z float64) (l, a, b float64) { + // Use D65 white as reference point by default. + // http://www.fredmiranda.com/forum/topic/1035332 + // http://en.wikipedia.org/wiki/Standard_illuminant + return XyzToLabWhiteRef(x, y, z, D65) +} + +func XyzToLabWhiteRef(x, y, z float64, wref [3]float64) (l, a, b float64) { + fy := lab_f(y / wref[1]) + l = 1.16*fy - 0.16 + a = 5.0 * (lab_f(x/wref[0]) - fy) + b = 2.0 * (fy - lab_f(z/wref[2])) + return +} + +func lab_finv(t float64) float64 { + if t > 6.0/29.0 { + return t * t * t + } + return 3.0 * 6.0 / 29.0 * 6.0 / 29.0 * (t - 4.0/29.0) +} + +func LabToXyz(l, a, b float64) (x, y, z float64) { + // D65 white (see above). + return LabToXyzWhiteRef(l, a, b, D65) +} + +func LabToXyzWhiteRef(l, a, b float64, wref [3]float64) (x, y, z float64) { + l2 := (l + 0.16) / 1.16 + x = wref[0] * lab_finv(l2+a/5.0) + y = wref[1] * lab_finv(l2) + z = wref[2] * lab_finv(l2-b/2.0) + return +} + +// Converts the given color to CIE L*a*b* space using D65 as reference white. +func (col Color) Lab() (l, a, b float64) { + return XyzToLab(col.Xyz()) +} + +// Converts the given color to CIE L*a*b* space, taking into account +// a given reference white. (i.e. the monitor's white) +func (col Color) LabWhiteRef(wref [3]float64) (l, a, b float64) { + x, y, z := col.Xyz() + return XyzToLabWhiteRef(x, y, z, wref) +} + +// Generates a color by using data given in CIE L*a*b* space using D65 as reference white. +// WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding +// valid RGB values, check the FAQ in the README if you're unsure. +func Lab(l, a, b float64) Color { + return Xyz(LabToXyz(l, a, b)) +} + +// Generates a color by using data given in CIE L*a*b* space, taking +// into account a given reference white. (i.e. the monitor's white) +func LabWhiteRef(l, a, b float64, wref [3]float64) Color { + return Xyz(LabToXyzWhiteRef(l, a, b, wref)) +} + +// DistanceLab is a good measure of visual similarity between two colors! +// A result of 0 would mean identical colors, while a result of 1 or higher +// means the colors differ a lot. +func (c1 Color) DistanceLab(c2 Color) float64 { + l1, a1, b1 := c1.Lab() + l2, a2, b2 := c2.Lab() + return math.Sqrt(sq(l1-l2) + sq(a1-a2) + sq(b1-b2)) +} + +// That's actually the same, but I don't want to break code. +func (c1 Color) DistanceCIE76(c2 Color) float64 { + return c1.DistanceLab(c2) +} + +// Uses the CIE94 formula to calculate color distance. More accurate than +// DistanceLab, but also more work. +func (cl Color) DistanceCIE94(cr Color) float64 { + l1, a1, b1 := cl.Lab() + l2, a2, b2 := cr.Lab() + + // NOTE: Since all those formulas expect L,a,b values 100x larger than we + // have them in this library, we either need to adjust all constants + // in the formula, or convert the ranges of L,a,b before, and then + // scale the distances down again. The latter is less error-prone. + l1, a1, b1 = l1*100.0, a1*100.0, b1*100.0 + l2, a2, b2 = l2*100.0, a2*100.0, b2*100.0 + + kl := 1.0 // 2.0 for textiles + kc := 1.0 + kh := 1.0 + k1 := 0.045 // 0.048 for textiles + k2 := 0.015 // 0.014 for textiles. + + deltaL := l1 - l2 + c1 := math.Sqrt(sq(a1) + sq(b1)) + c2 := math.Sqrt(sq(a2) + sq(b2)) + deltaCab := c1 - c2 + + // Not taking Sqrt here for stability, and it's unnecessary. + deltaHab2 := sq(a1-a2) + sq(b1-b2) - sq(deltaCab) + sl := 1.0 + sc := 1.0 + k1*c1 + sh := 1.0 + k2*c1 + + vL2 := sq(deltaL / (kl * sl)) + vC2 := sq(deltaCab / (kc * sc)) + vH2 := deltaHab2 / sq(kh*sh) + + return math.Sqrt(vL2+vC2+vH2) * 0.01 // See above. +} + +// BlendLab blends two colors in the L*a*b* color-space, which should result in a smoother blend. +// t == 0 results in c1, t == 1 results in c2 +func (c1 Color) BlendLab(c2 Color, t float64) Color { + l1, a1, b1 := c1.Lab() + l2, a2, b2 := c2.Lab() + return Lab(l1+t*(l2-l1), + a1+t*(a2-a1), + b1+t*(b2-b1)) +} + +/// L*u*v* /// +////////////// +// http://en.wikipedia.org/wiki/CIELUV#XYZ_.E2.86.92_CIELUV_and_CIELUV_.E2.86.92_XYZ_conversions +// For L*u*v*, we need to L*u*v*<->XYZ<->RGB and the first one is device dependent. + +func XyzToLuv(x, y, z float64) (l, a, b float64) { + // Use D65 white as reference point by default. + // http://www.fredmiranda.com/forum/topic/1035332 + // http://en.wikipedia.org/wiki/Standard_illuminant + return XyzToLuvWhiteRef(x, y, z, D65) +} + +func XyzToLuvWhiteRef(x, y, z float64, wref [3]float64) (l, u, v float64) { + if y/wref[1] <= 6.0/29.0*6.0/29.0*6.0/29.0 { + l = y / wref[1] * 29.0 / 3.0 * 29.0 / 3.0 * 29.0 / 3.0 + } else { + l = 1.16*math.Cbrt(y/wref[1]) - 0.16 + } + ubis, vbis := xyz_to_uv(x, y, z) + un, vn := xyz_to_uv(wref[0], wref[1], wref[2]) + u = 13.0 * l * (ubis - un) + v = 13.0 * l * (vbis - vn) + return +} + +// For this part, we do as R's graphics.hcl does, not as wikipedia does. +// Or is it the same? +func xyz_to_uv(x, y, z float64) (u, v float64) { + denom := x + 15.0*y + 3.0*z + if denom == 0.0 { + u, v = 0.0, 0.0 + } else { + u = 4.0 * x / denom + v = 9.0 * y / denom + } + return +} + +func LuvToXyz(l, u, v float64) (x, y, z float64) { + // D65 white (see above). + return LuvToXyzWhiteRef(l, u, v, D65) +} + +func LuvToXyzWhiteRef(l, u, v float64, wref [3]float64) (x, y, z float64) { + //y = wref[1] * lab_finv((l + 0.16) / 1.16) + if l <= 0.08 { + y = wref[1] * l * 100.0 * 3.0 / 29.0 * 3.0 / 29.0 * 3.0 / 29.0 + } else { + y = wref[1] * cub((l+0.16)/1.16) + } + un, vn := xyz_to_uv(wref[0], wref[1], wref[2]) + if l != 0.0 { + ubis := u/(13.0*l) + un + vbis := v/(13.0*l) + vn + x = y * 9.0 * ubis / (4.0 * vbis) + z = y * (12.0 - 3.0*ubis - 20.0*vbis) / (4.0 * vbis) + } else { + x, y = 0.0, 0.0 + } + return +} + +// Converts the given color to CIE L*u*v* space using D65 as reference white. +// L* is in [0..1] and both u* and v* are in about [-1..1] +func (col Color) Luv() (l, u, v float64) { + return XyzToLuv(col.Xyz()) +} + +// Converts the given color to CIE L*u*v* space, taking into account +// a given reference white. (i.e. the monitor's white) +// L* is in [0..1] and both u* and v* are in about [-1..1] +func (col Color) LuvWhiteRef(wref [3]float64) (l, u, v float64) { + x, y, z := col.Xyz() + return XyzToLuvWhiteRef(x, y, z, wref) +} + +// Generates a color by using data given in CIE L*u*v* space using D65 as reference white. +// L* is in [0..1] and both u* and v* are in about [-1..1] +// WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding +// valid RGB values, check the FAQ in the README if you're unsure. +func Luv(l, u, v float64) Color { + return Xyz(LuvToXyz(l, u, v)) +} + +// Generates a color by using data given in CIE L*u*v* space, taking +// into account a given reference white. (i.e. the monitor's white) +// L* is in [0..1] and both u* and v* are in about [-1..1] +func LuvWhiteRef(l, u, v float64, wref [3]float64) Color { + return Xyz(LuvToXyzWhiteRef(l, u, v, wref)) +} + +// DistanceLuv is a good measure of visual similarity between two colors! +// A result of 0 would mean identical colors, while a result of 1 or higher +// means the colors differ a lot. +func (c1 Color) DistanceLuv(c2 Color) float64 { + l1, u1, v1 := c1.Luv() + l2, u2, v2 := c2.Luv() + return math.Sqrt(sq(l1-l2) + sq(u1-u2) + sq(v1-v2)) +} + +// BlendLuv blends two colors in the CIE-L*u*v* color-space, which should result in a smoother blend. +// t == 0 results in c1, t == 1 results in c2 +func (c1 Color) BlendLuv(c2 Color, t float64) Color { + l1, u1, v1 := c1.Luv() + l2, u2, v2 := c2.Luv() + return Luv(l1+t*(l2-l1), + u1+t*(u2-u1), + v1+t*(v2-v1)) +} + +/// HCL /// +/////////// +// HCL is nothing else than L*a*b* in cylindrical coordinates! +// (this was wrong on English wikipedia, I fixed it, let's hope the fix stays.) +// But it is widely popular since it is a "correct HSV" +// http://www.hunterlab.com/appnotes/an09_96a.pdf + +// Converts the given color to HCL space using D65 as reference white. +// H values are in [0..360], C and L values are in [0..1] although C can overshoot 1.0 +func (col Color) Hcl() (h, c, l float64) { + return col.HclWhiteRef(D65) +} + +func LabToHcl(L, a, b float64) (h, c, l float64) { + // Oops, floating point workaround necessary if a ~= b and both are very small (i.e. almost zero). + if math.Abs(b-a) > 1e-4 && math.Abs(a) > 1e-4 { + h = math.Mod(57.29577951308232087721*math.Atan2(b, a)+360.0, 360.0) // Rad2Deg + } else { + h = 0.0 + } + c = math.Sqrt(sq(a) + sq(b)) + l = L + return +} + +// Converts the given color to HCL space, taking into account +// a given reference white. (i.e. the monitor's white) +// H values are in [0..360], C and L values are in [0..1] +func (col Color) HclWhiteRef(wref [3]float64) (h, c, l float64) { + L, a, b := col.LabWhiteRef(wref) + return LabToHcl(L, a, b) +} + +// Generates a color by using data given in HCL space using D65 as reference white. +// H values are in [0..360], C and L values are in [0..1] +// WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding +// valid RGB values, check the FAQ in the README if you're unsure. +func Hcl(h, c, l float64) Color { + return HclWhiteRef(h, c, l, D65) +} + +func HclToLab(h, c, l float64) (L, a, b float64) { + H := 0.01745329251994329576 * h // Deg2Rad + a = c * math.Cos(H) + b = c * math.Sin(H) + L = l + return +} + +// Generates a color by using data given in HCL space, taking +// into account a given reference white. (i.e. the monitor's white) +// H values are in [0..360], C and L values are in [0..1] +func HclWhiteRef(h, c, l float64, wref [3]float64) Color { + L, a, b := HclToLab(h, c, l) + return LabWhiteRef(L, a, b, wref) +} + +// BlendHcl blends two colors in the CIE-L*C*h° color-space, which should result in a smoother blend. +// t == 0 results in c1, t == 1 results in c2 +func (col1 Color) BlendHcl(col2 Color, t float64) Color { + h1, c1, l1 := col1.Hcl() + h2, c2, l2 := col2.Hcl() + + // We know that h are both in [0..360] + return Hcl(interp_angle(h1, h2, t), c1+t*(c2-c1), l1+t*(l2-l1)) +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go b/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go new file mode 100644 index 00000000000..bb66dfa4f9f --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go @@ -0,0 +1,25 @@ +package colorful + +import ( + "math/rand" +) + +// Uses the HSV color space to generate colors with similar S,V but distributed +// evenly along their Hue. This is fast but not always pretty. +// If you've got time to spare, use Lab (the non-fast below). +func FastHappyPalette(colorsCount int) (colors []Color) { + colors = make([]Color, colorsCount) + + for i := 0; i < colorsCount; i++ { + colors[i] = Hsv(float64(i)*(360.0/float64(colorsCount)), 0.8+rand.Float64()*0.2, 0.65+rand.Float64()*0.2) + } + return +} + +func HappyPalette(colorsCount int) ([]Color, error) { + pimpy := func(l, a, b float64) bool { + _, c, _ := LabToHcl(l, a, b) + return 0.3 <= c && 0.4 <= l && l <= 0.8 + } + return SoftPaletteEx(colorsCount, SoftPaletteSettings{pimpy, 50, true}) +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go b/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go new file mode 100644 index 00000000000..86a5ed98714 --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go @@ -0,0 +1,37 @@ +package colorful + +import ( + "database/sql/driver" + "fmt" + "reflect" +) + +// A HexColor is a Color stored as a hex string "#rrggbb". It implements the +// database/sql.Scanner and database/sql/driver.Value interfaces. +type HexColor Color + +type errUnsupportedType struct { + got interface{} + want reflect.Type +} + +func (hc *HexColor) Scan(value interface{}) error { + s, ok := value.(string) + if !ok { + return errUnsupportedType{got: reflect.TypeOf(value), want: reflect.TypeOf("")} + } + c, err := Hex(s) + if err != nil { + return err + } + *hc = HexColor(c) + return nil +} + +func (hc *HexColor) Value() (driver.Value, error) { + return Color(*hc).Hex(), nil +} + +func (e errUnsupportedType) Error() string { + return fmt.Sprintf("unsupported type: got %v, want a %s", e.got, e.want) +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go b/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go new file mode 100644 index 00000000000..0154ac9bac8 --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go @@ -0,0 +1,185 @@ +// Largely inspired by the descriptions in http://lab.medialab.sciences-po.fr/iwanthue/ +// but written from scratch. + +package colorful + +import ( + "fmt" + "math" + "math/rand" +) + +// The algorithm works in L*a*b* color space and converts to RGB in the end. +// L* in [0..1], a* and b* in [-1..1] +type lab_t struct { + L, A, B float64 +} + +type SoftPaletteSettings struct { + // A function which can be used to restrict the allowed color-space. + CheckColor func(l, a, b float64) bool + + // The higher, the better quality but the slower. Usually two figures. + Iterations int + + // Use up to 160000 or 8000 samples of the L*a*b* space (and thus calls to CheckColor). + // Set this to true only if your CheckColor shapes the Lab space weirdly. + ManySamples bool +} + +// Yeah, windows-stype Foo, FooEx, screw you golang... +// Uses K-means to cluster the color-space and return the means of the clusters +// as a new palette of distinctive colors. Falls back to K-medoid if the mean +// happens to fall outside of the color-space, which can only happen if you +// specify a CheckColor function. +func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, error) { + + // Checks whether it's a valid RGB and also fulfills the potentially provided constraint. + check := func(col lab_t) bool { + c := Lab(col.L, col.A, col.B) + return c.IsValid() && (settings.CheckColor == nil || settings.CheckColor(col.L, col.A, col.B)) + } + + // Sample the color space. These will be the points k-means is run on. + dl := 0.05 + dab := 0.1 + if settings.ManySamples { + dl = 0.01 + dab = 0.05 + } + + samples := make([]lab_t, 0, int(1.0/dl*2.0/dab*2.0/dab)) + for l := 0.0; l <= 1.0; l += dl { + for a := -1.0; a <= 1.0; a += dab { + for b := -1.0; b <= 1.0; b += dab { + if check(lab_t{l, a, b}) { + samples = append(samples, lab_t{l, a, b}) + } + } + } + } + + // That would cause some infinite loops down there... + if len(samples) < colorsCount { + return nil, fmt.Errorf("palettegen: more colors requested (%v) than samples available (%v). Your requested color count may be wrong, you might want to use many samples or your constraint function makes the valid color space too small.", colorsCount, len(samples)) + } else if len(samples) == colorsCount { + return labs2cols(samples), nil // Oops? + } + + // We take the initial means out of the samples, so they are in fact medoids. + // This helps us avoid infinite loops or arbitrary cutoffs with too restrictive constraints. + means := make([]lab_t, colorsCount) + for i := 0; i < colorsCount; i++ { + for means[i] = samples[rand.Intn(len(samples))]; in(means, i, means[i]); means[i] = samples[rand.Intn(len(samples))] { + } + } + + clusters := make([]int, len(samples)) + samples_used := make([]bool, len(samples)) + + // The actual k-means/medoid iterations + for i := 0; i < settings.Iterations; i++ { + // Reassing the samples to clusters, i.e. to their closest mean. + // By the way, also check if any sample is used as a medoid and if so, mark that. + for isample, sample := range samples { + samples_used[isample] = false + mindist := math.Inf(+1) + for imean, mean := range means { + dist := lab_dist(sample, mean) + if dist < mindist { + mindist = dist + clusters[isample] = imean + } + + // Mark samples which are used as a medoid. + if lab_eq(sample, mean) { + samples_used[isample] = true + } + } + } + + // Compute new means according to the samples. + for imean := range means { + // The new mean is the average of all samples belonging to it.. + nsamples := 0 + newmean := lab_t{0.0, 0.0, 0.0} + for isample, sample := range samples { + if clusters[isample] == imean { + nsamples++ + newmean.L += sample.L + newmean.A += sample.A + newmean.B += sample.B + } + } + if nsamples > 0 { + newmean.L /= float64(nsamples) + newmean.A /= float64(nsamples) + newmean.B /= float64(nsamples) + } else { + // That mean doesn't have any samples? Get a new mean from the sample list! + var inewmean int + for inewmean = rand.Intn(len(samples_used)); samples_used[inewmean]; inewmean = rand.Intn(len(samples_used)) { + } + newmean = samples[inewmean] + samples_used[inewmean] = true + } + + // But now we still need to check whether the new mean is an allowed color. + if nsamples > 0 && check(newmean) { + // It does, life's good (TM) + means[imean] = newmean + } else { + // New mean isn't an allowed color or doesn't have any samples! + // Switch to medoid mode and pick the closest (unused) sample. + // This should always find something thanks to len(samples) >= colorsCount + mindist := math.Inf(+1) + for isample, sample := range samples { + if !samples_used[isample] { + dist := lab_dist(sample, newmean) + if dist < mindist { + mindist = dist + newmean = sample + } + } + } + } + } + } + return labs2cols(means), nil +} + +// A wrapper which uses common parameters. +func SoftPalette(colorsCount int) ([]Color, error) { + return SoftPaletteEx(colorsCount, SoftPaletteSettings{nil, 50, false}) +} + +func in(haystack []lab_t, upto int, needle lab_t) bool { + for i := 0; i < upto && i < len(haystack); i++ { + if haystack[i] == needle { + return true + } + } + return false +} + +const LAB_DELTA = 1e-6 + +func lab_eq(lab1, lab2 lab_t) bool { + return math.Abs(lab1.L-lab2.L) < LAB_DELTA && + math.Abs(lab1.A-lab2.A) < LAB_DELTA && + math.Abs(lab1.B-lab2.B) < LAB_DELTA +} + +// That's faster than using colorful's DistanceLab since we would have to +// convert back and forth for that. Here is no conversion. +func lab_dist(lab1, lab2 lab_t) float64 { + return math.Sqrt(sq(lab1.L-lab2.L) + sq(lab1.A-lab2.A) + sq(lab1.B-lab2.B)) +} + +func labs2cols(labs []lab_t) (cols []Color) { + cols = make([]Color, len(labs)) + for k, v := range labs { + cols[k] = Lab(v.L, v.A, v.B) + } + return cols +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go b/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go new file mode 100644 index 00000000000..00f42a5cc7c --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go @@ -0,0 +1,25 @@ +package colorful + +import ( + "math/rand" +) + +// Uses the HSV color space to generate colors with similar S,V but distributed +// evenly along their Hue. This is fast but not always pretty. +// If you've got time to spare, use Lab (the non-fast below). +func FastWarmPalette(colorsCount int) (colors []Color) { + colors = make([]Color, colorsCount) + + for i := 0; i < colorsCount; i++ { + colors[i] = Hsv(float64(i)*(360.0/float64(colorsCount)), 0.55+rand.Float64()*0.2, 0.35+rand.Float64()*0.2) + } + return +} + +func WarmPalette(colorsCount int) ([]Color, error) { + warmy := func(l, a, b float64) bool { + _, c, _ := LabToHcl(l, a, b) + return 0.1 <= c && c <= 0.4 && 0.2 <= l && l <= 0.5 + } + return SoftPaletteEx(colorsCount, SoftPaletteSettings{warmy, 50, true}) +} diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 00000000000..91b5cef30eb --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 00000000000..3cb94106f99 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,977 @@ +package runewidth + +import ( + "os" +) + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth bool + + // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ + ZeroWidthJoiner bool + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{} +) + +func init() { + handleEnv() +} + +func handleEnv() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } + // update DefaultCondition + DefaultCondition.EastAsianWidth = EastAsianWidth + DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner +} + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + // func (t table) IncludesRune(r rune) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) >> 1 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD}, + {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, + {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F}, + {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4}, + {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711}, + {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, + {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827}, + {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1}, + {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F}, + {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983}, + {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3}, + {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, + {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03}, + {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48}, + {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63}, + {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, + {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03}, + {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83}, + {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, + {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, + {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, + {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, + {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3}, + {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, + {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, + {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F}, + {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E}, + {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, + {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, + {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, + {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, + {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, + {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, + {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, + {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, + {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, + {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, + {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, + {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4}, + {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, + {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A}, + {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, + {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, + {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881}, + {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D}, + {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, + {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, + {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, + {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, + {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, + {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, + {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, + {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, + {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, + {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002}, + {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, + {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173}, + {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC}, + {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, + {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344}, + {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, + {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5}, + {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, + {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36}, + {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, + {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E}, + {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, + {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, + {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, + {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, + {0xE0100, 0xE01EF}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA}, + {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247}, + {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C}, + {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, + {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, + {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, + {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC}, + {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, + {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, + {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, + {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, + {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, + {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, + {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, + {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6}, + {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930}, + {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E}, + {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD}, + {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} + +var emoji = table{ + {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, + {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, + {0x231A, 0x231B}, {0x2328, 0x2328}, {0x23CF, 0x23CF}, + {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, {0x24C2, 0x24C2}, + {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, {0x25C0, 0x25C0}, + {0x25FB, 0x25FE}, {0x2600, 0x2604}, {0x260E, 0x260E}, + {0x2611, 0x2611}, {0x2614, 0x2615}, {0x2618, 0x2618}, + {0x261D, 0x261D}, {0x2620, 0x2620}, {0x2622, 0x2623}, + {0x2626, 0x2626}, {0x262A, 0x262A}, {0x262E, 0x262F}, + {0x2638, 0x263A}, {0x2640, 0x2640}, {0x2642, 0x2642}, + {0x2648, 0x2653}, {0x265F, 0x2660}, {0x2663, 0x2663}, + {0x2665, 0x2666}, {0x2668, 0x2668}, {0x267B, 0x267B}, + {0x267E, 0x267F}, {0x2692, 0x2697}, {0x2699, 0x2699}, + {0x269B, 0x269C}, {0x26A0, 0x26A1}, {0x26AA, 0x26AB}, + {0x26B0, 0x26B1}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26C8, 0x26C8}, {0x26CE, 0x26CF}, {0x26D1, 0x26D1}, + {0x26D3, 0x26D4}, {0x26E9, 0x26EA}, {0x26F0, 0x26F5}, + {0x26F7, 0x26FA}, {0x26FD, 0x26FD}, {0x2702, 0x2702}, + {0x2705, 0x2705}, {0x2708, 0x270D}, {0x270F, 0x270F}, + {0x2712, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, + {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, + {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, + {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, + {0x2757, 0x2757}, {0x2763, 0x2764}, {0x2795, 0x2797}, + {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, + {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, + {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, + {0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F170, 0x1F171}, + {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F1E6, 0x1F1FF}, {0x1F201, 0x1F202}, {0x1F21A, 0x1F21A}, + {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, {0x1F250, 0x1F251}, + {0x1F300, 0x1F321}, {0x1F324, 0x1F393}, {0x1F396, 0x1F397}, + {0x1F399, 0x1F39B}, {0x1F39E, 0x1F3F0}, {0x1F3F3, 0x1F3F5}, + {0x1F3F7, 0x1F4FD}, {0x1F4FF, 0x1F53D}, {0x1F549, 0x1F54E}, + {0x1F550, 0x1F567}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F57A}, + {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, + {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, + {0x1F5B1, 0x1F5B2}, {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, + {0x1F5D1, 0x1F5D3}, {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, + {0x1F5E3, 0x1F5E3}, {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, + {0x1F5F3, 0x1F5F3}, {0x1F5FA, 0x1F64F}, {0x1F680, 0x1F6C5}, + {0x1F6CB, 0x1F6D2}, {0x1F6E0, 0x1F6E5}, {0x1F6E9, 0x1F6E9}, + {0x1F6EB, 0x1F6EC}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F9}, + {0x1F910, 0x1F93A}, {0x1F93C, 0x1F93E}, {0x1F940, 0x1F945}, + {0x1F947, 0x1F970}, {0x1F973, 0x1F976}, {0x1F97A, 0x1F97A}, + {0x1F97C, 0x1F9A2}, {0x1F9B0, 0x1F9B9}, {0x1F9C0, 0x1F9C2}, + {0x1F9D0, 0x1F9FF}, +} + +var notassigned = table{ + {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B}, + {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530}, + {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588}, + {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF}, + {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D}, + {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF}, + {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F}, + {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5}, + {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E}, + {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1}, + {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6}, + {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB}, + {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00}, + {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12}, + {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34}, + {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D}, + {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50}, + {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65}, + {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E}, + {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1}, + {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6}, + {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF}, + {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00}, + {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12}, + {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34}, + {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A}, + {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E}, + {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84}, + {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98}, + {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2}, + {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD}, + {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF}, + {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF}, + {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11}, + {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45}, + {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57}, + {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77}, + {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91}, + {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB}, + {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4}, + {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5}, + {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04}, + {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C}, + {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53}, + {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84}, + {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC}, + {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE}, + {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5}, + {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E}, + {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86}, + {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93}, + {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4}, + {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC}, + {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5}, + {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB}, + {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70}, + {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD}, + {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, + {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, + {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, + {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, + {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1}, + {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, + {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, + {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, + {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D}, + {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, + {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, + {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, + {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F}, + {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, + {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, + {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF}, + {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, + {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, + {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF}, + {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, + {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF}, + {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF}, + {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, + {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, + {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, + {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, + {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1}, + {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065}, + {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, + {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F}, + {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F}, + {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC}, + {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF}, + {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, + {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, + {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, + {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, + {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, + {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F}, + {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, + {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, + {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F}, + {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F}, + {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF}, + {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, + {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6}, + {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, + {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF}, + {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, + {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, + {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, + {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, + {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, + {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, + {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF}, + {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, + {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, + {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, + {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, + {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, + {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, + {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00}, + {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, + {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, + {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, + {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E}, + {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, + {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, + {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, + {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, + {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, + {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, + {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7}, + {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, + {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, + {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809}, + {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, + {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, + {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, + {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, + {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, + {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37}, + {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F}, + {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, + {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, + {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF}, + {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, + {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051}, + {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, + {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F}, + {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0}, + {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, + {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, + {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, + {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, + {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, + {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346}, + {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, + {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, + {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C}, + {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, + {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, + {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF}, + {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F}, + {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF}, + {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37}, + {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, + {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF}, + {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, + {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, + {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, + {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, + {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, + {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E}, + {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF}, + {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, + {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, + {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, + {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F}, + {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, + {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, + {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, + {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, + {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, + {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549}, + {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, + {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, + {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022}, + {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, + {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D}, + {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, + {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28}, + {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, + {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, + {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, + {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, + {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, + {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, + {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, + {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, + {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, + {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F}, + {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, + {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F}, + {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5}, + {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, + {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, + {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF}, + {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, + {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F}, + {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F}, + {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF}, + {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F}, + {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000}, + {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF}, + {0xFFFFE, 0xFFFFF}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9}, + {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, + {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, + {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, + {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, + {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, + {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, + {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, + {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6}, + {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, + {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F}, + {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390}, + {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400}, + {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, + {0x0531, 0x0556}, {0x0559, 0x055F}, {0x0561, 0x0587}, + {0x0589, 0x058A}, {0x058D, 0x058F}, {0x0591, 0x05C7}, + {0x05D0, 0x05EA}, {0x05F0, 0x05F4}, {0x0600, 0x061C}, + {0x061E, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1}, + {0x07C0, 0x07FA}, {0x0800, 0x082D}, {0x0830, 0x083E}, + {0x0840, 0x085B}, {0x085E, 0x085E}, {0x08A0, 0x08B4}, + {0x08B6, 0x08BD}, {0x08D4, 0x0983}, {0x0985, 0x098C}, + {0x098F, 0x0990}, {0x0993, 0x09A8}, {0x09AA, 0x09B0}, + {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, + {0x09C7, 0x09C8}, {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, + {0x09DC, 0x09DD}, {0x09DF, 0x09E3}, {0x09E6, 0x09FB}, + {0x0A01, 0x0A03}, {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, + {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, + {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, + {0x0A3E, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, + {0x0A51, 0x0A51}, {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, + {0x0A66, 0x0A75}, {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, + {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, + {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, + {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, + {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AF9}, + {0x0B01, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, + {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, + {0x0B35, 0x0B39}, {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, + {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B5C, 0x0B5D}, + {0x0B5F, 0x0B63}, {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, + {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, + {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, + {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, + {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, + {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, + {0x0C00, 0x0C03}, {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10}, + {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3D, 0x0C44}, + {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, + {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, {0x0C66, 0x0C6F}, + {0x0C78, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, + {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D01, 0x0D03}, + {0x0D05, 0x0D0C}, {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, + {0x0D3D, 0x0D44}, {0x0D46, 0x0D48}, {0x0D4A, 0x0D4F}, + {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, {0x0D82, 0x0D83}, + {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, + {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, + {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, + {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, {0x0E01, 0x0E3A}, + {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, {0x0E84, 0x0E84}, + {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, {0x0E8D, 0x0E8D}, + {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, {0x0EA1, 0x0EA3}, + {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, {0x0EAA, 0x0EAB}, + {0x0EAD, 0x0EB9}, {0x0EBB, 0x0EBD}, {0x0EC0, 0x0EC4}, + {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, + {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, + {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, + {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, + {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, + {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, + {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, + {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, + {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, + {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, + {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, + {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, + {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, + {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, + {0x1820, 0x1877}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, + {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, + {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, + {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, + {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, + {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, + {0x1AB0, 0x1ABE}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, + {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, + {0x1C4D, 0x1C88}, {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CF6}, + {0x1CF8, 0x1CF9}, {0x1D00, 0x1DF5}, {0x1DFB, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, + {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, + {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, + {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20F0}, + {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, + {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, + {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, + {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, + {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, + {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, + {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, + {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, + {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, + {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, + {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, + {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, + {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, + {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, + {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, + {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, {0x2400, 0x2426}, + {0x2440, 0x244A}, {0x24EA, 0x24EA}, {0x254C, 0x254F}, + {0x2574, 0x257F}, {0x2590, 0x2591}, {0x2596, 0x259F}, + {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, + {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, + {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, + {0x25E6, 0x25EE}, {0x25F0, 0x25FC}, {0x25FF, 0x2604}, + {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, + {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F}, + {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F}, + {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B}, + {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692}, + {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, + {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, + {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, + {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B}, + {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756}, + {0x2758, 0x2775}, {0x2780, 0x2794}, {0x2798, 0x27AF}, + {0x27B1, 0x27BE}, {0x27C0, 0x27E5}, {0x27EE, 0x2984}, + {0x2987, 0x2B1A}, {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, + {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9}, + {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF}, + {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2CF3}, + {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, + {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, + {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, + {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, + {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2E44}, + {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, + {0xA640, 0xA6F7}, {0xA700, 0xA7AE}, {0xA7B0, 0xA7B7}, + {0xA7F7, 0xA82B}, {0xA830, 0xA839}, {0xA840, 0xA877}, + {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, {0xA8E0, 0xA8FD}, + {0xA900, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, + {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, + {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, + {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB65}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, + {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, + {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, + {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, + {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, + {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, + {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, + {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, + {0x10300, 0x10323}, {0x10330, 0x1034A}, {0x10350, 0x1037A}, + {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, + {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, + {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, + {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, + {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A47}, {0x10A50, 0x10A58}, + {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, + {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x1104D}, + {0x11052, 0x1106F}, {0x1107F, 0x110C1}, {0x110D0, 0x110E8}, + {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11143}, + {0x11150, 0x11176}, {0x11180, 0x111CD}, {0x111D0, 0x111DF}, + {0x111E1, 0x111F4}, {0x11200, 0x11211}, {0x11213, 0x1123E}, + {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D}, + {0x1128F, 0x1129D}, {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, + {0x112F0, 0x112F9}, {0x11300, 0x11303}, {0x11305, 0x1130C}, + {0x1130F, 0x11310}, {0x11313, 0x11328}, {0x1132A, 0x11330}, + {0x11332, 0x11333}, {0x11335, 0x11339}, {0x1133C, 0x11344}, + {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350}, + {0x11357, 0x11357}, {0x1135D, 0x11363}, {0x11366, 0x1136C}, + {0x11370, 0x11374}, {0x11400, 0x11459}, {0x1145B, 0x1145B}, + {0x1145D, 0x1145D}, {0x11480, 0x114C7}, {0x114D0, 0x114D9}, + {0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644}, + {0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B7}, + {0x116C0, 0x116C9}, {0x11700, 0x11719}, {0x1171D, 0x1172B}, + {0x11730, 0x1173F}, {0x118A0, 0x118F2}, {0x118FF, 0x118FF}, + {0x11AC0, 0x11AF8}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, + {0x11C38, 0x11C45}, {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, + {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x12000, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, + {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F}, + {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, + {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, + {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, {0x16F50, 0x16F7E}, + {0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, + {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, + {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, + {0x1D200, 0x1D245}, {0x1D300, 0x1D356}, {0x1D360, 0x1D371}, + {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, + {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, + {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, + {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, + {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, + {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, + {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, + {0x1E900, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, + {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, + {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, + {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, + {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, + {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, + {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, + {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, + {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, + {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, + {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, + {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, + {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, + {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C}, + {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA}, + {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4}, + {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, + {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001}, + {0xE0020, 0xE007F}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool + ZeroWidthJoiner bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{ + EastAsianWidth: EastAsianWidth, + ZeroWidthJoiner: ZeroWidthJoiner, + } +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || + inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || + inTables(r, doublewidth, emoji): + return 2 + default: + return 1 + } +} + +func (c *Condition) stringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +func (c *Condition) stringWidthZeroJoiner(s string) (width int) { + r1, r2 := rune(0), rune(0) + for _, r := range []rune(s) { + if r == 0xFE0E || r == 0xFE0F { + continue + } + w := c.RuneWidth(r) + if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) { + w = 0 + } + width += w + r1, r2 = r2, r + } + return width +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + if c.ZeroWidthJoiner { + return c.stringWidthZeroJoiner(s) + } + return c.stringWidth(s) +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go new file mode 100644 index 00000000000..7d99f6e5210 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package runewidth + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 00000000000..c5fdf40baa0 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,9 @@ +// +build js +// +build !appengine + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 00000000000..66a58b5d873 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,79 @@ +// +build !windows +// +build !js +// +build !appengine + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_CTYPE") + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 00000000000..d6a61777d7b --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,28 @@ +// +build windows +// +build !appengine + +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 00000000000..a3866a291fd --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 00000000000..204afb42005 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 00000000000..31b42cadf8d --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 00000000000..832d47dd169 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/rivo/tview/LICENSE.txt b/vendor/github.com/rivo/tview/LICENSE.txt new file mode 100644 index 00000000000..9d6943073c5 --- /dev/null +++ b/vendor/github.com/rivo/tview/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Oliver Kuederle + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rivo/tview/ansi.go b/vendor/github.com/rivo/tview/ansi.go new file mode 100644 index 00000000000..4d14c282702 --- /dev/null +++ b/vendor/github.com/rivo/tview/ansi.go @@ -0,0 +1,237 @@ +package tview + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// The states of the ANSI escape code parser. +const ( + ansiText = iota + ansiEscape + ansiSubstring + ansiControlSequence +) + +// ansi is a io.Writer which translates ANSI escape codes into tview color +// tags. +type ansi struct { + io.Writer + + // Reusable buffers. + buffer *bytes.Buffer // The entire output text of one Write(). + csiParameter, csiIntermediate *bytes.Buffer // Partial CSI strings. + + // The current state of the parser. One of the ansi constants. + state int +} + +// ANSIWriter returns an io.Writer which translates any ANSI escape codes +// written to it into tview color tags. Other escape codes don't have an effect +// and are simply removed. The translated text is written to the provided +// writer. +func ANSIWriter(writer io.Writer) io.Writer { + return &ansi{ + Writer: writer, + buffer: new(bytes.Buffer), + csiParameter: new(bytes.Buffer), + csiIntermediate: new(bytes.Buffer), + state: ansiText, + } +} + +// Write parses the given text as a string of runes, translates ANSI escape +// codes to color tags and writes them to the output writer. +func (a *ansi) Write(text []byte) (int, error) { + defer func() { + a.buffer.Reset() + }() + + for _, r := range string(text) { + switch a.state { + + // We just entered an escape sequence. + case ansiEscape: + switch r { + case '[': // Control Sequence Introducer. + a.csiParameter.Reset() + a.csiIntermediate.Reset() + a.state = ansiControlSequence + case 'c': // Reset. + fmt.Fprint(a.buffer, "[-:-:-]") + a.state = ansiText + case 'P', ']', 'X', '^', '_': // Substrings and commands. + a.state = ansiSubstring + default: // Ignore. + a.state = ansiText + } + + // CSI Sequences. + case ansiControlSequence: + switch { + case r >= 0x30 && r <= 0x3f: // Parameter bytes. + if _, err := a.csiParameter.WriteRune(r); err != nil { + return 0, err + } + case r >= 0x20 && r <= 0x2f: // Intermediate bytes. + if _, err := a.csiIntermediate.WriteRune(r); err != nil { + return 0, err + } + case r >= 0x40 && r <= 0x7e: // Final byte. + switch r { + case 'E': // Next line. + count, _ := strconv.Atoi(a.csiParameter.String()) + if count == 0 { + count = 1 + } + fmt.Fprint(a.buffer, strings.Repeat("\n", count)) + case 'm': // Select Graphic Rendition. + var ( + background, foreground, attributes string + clearAttributes bool + ) + fields := strings.Split(a.csiParameter.String(), ";") + if len(fields) == 0 || len(fields) == 1 && fields[0] == "0" { + // Reset. + if _, err := a.buffer.WriteString("[-:-:-]"); err != nil { + return 0, err + } + break + } + lookupColor := func(colorNumber int, bright bool) string { + if colorNumber < 0 || colorNumber > 7 { + return "black" + } + if bright { + colorNumber += 8 + } + return [...]string{ + "black", + "red", + "green", + "yellow", + "blue", + "darkmagenta", + "darkcyan", + "white", + "#7f7f7f", + "#ff0000", + "#00ff00", + "#ffff00", + "#5c5cff", + "#ff00ff", + "#00ffff", + "#ffffff", + }[colorNumber] + } + for index, field := range fields { + switch field { + case "1", "01": + attributes += "b" + case "2", "02": + attributes += "d" + case "4", "04": + attributes += "u" + case "5", "05": + attributes += "l" + case "7", "07": + attributes += "7" + case "22", "24", "25", "27": + clearAttributes = true + case "30", "31", "32", "33", "34", "35", "36", "37": + colorNumber, _ := strconv.Atoi(field) + foreground = lookupColor(colorNumber-30, false) + case "40", "41", "42", "43", "44", "45", "46", "47": + colorNumber, _ := strconv.Atoi(field) + background = lookupColor(colorNumber-40, false) + case "90", "91", "92", "93", "94", "95", "96", "97": + colorNumber, _ := strconv.Atoi(field) + foreground = lookupColor(colorNumber-90, true) + case "100", "101", "102", "103", "104", "105", "106", "107": + colorNumber, _ := strconv.Atoi(field) + background = lookupColor(colorNumber-100, true) + case "38", "48": + var color string + if len(fields) > index+1 { + if fields[index+1] == "5" && len(fields) > index+2 { // 8-bit colors. + colorNumber, _ := strconv.Atoi(fields[index+2]) + if colorNumber <= 7 { + color = lookupColor(colorNumber, false) + } else if colorNumber <= 15 { + color = lookupColor(colorNumber, true) + } else if colorNumber <= 231 { + red := (colorNumber - 16) / 36 + green := ((colorNumber - 16) / 6) % 6 + blue := (colorNumber - 16) % 6 + color = fmt.Sprintf("#%02x%02x%02x", 255*red/5, 255*green/5, 255*blue/5) + } else if colorNumber <= 255 { + grey := 255 * (colorNumber - 232) / 23 + color = fmt.Sprintf("#%02x%02x%02x", grey, grey, grey) + } + } else if fields[index+1] == "2" && len(fields) > index+4 { // 24-bit colors. + red, _ := strconv.Atoi(fields[index+2]) + green, _ := strconv.Atoi(fields[index+3]) + blue, _ := strconv.Atoi(fields[index+4]) + color = fmt.Sprintf("#%02x%02x%02x", red, green, blue) + } + } + if len(color) > 0 { + if field == "38" { + foreground = color + } else { + background = color + } + } + } + } + if len(attributes) > 0 || clearAttributes { + attributes = ":" + attributes + } + if len(foreground) > 0 || len(background) > 0 || len(attributes) > 0 { + fmt.Fprintf(a.buffer, "[%s:%s%s]", foreground, background, attributes) + } + } + a.state = ansiText + default: // Undefined byte. + a.state = ansiText // Abort CSI. + } + + // We just entered a substring/command sequence. + case ansiSubstring: + if r == 27 { // Most likely the end of the substring. + a.state = ansiEscape + } // Ignore all other characters. + + // "ansiText" and all others. + default: + if r == 27 { + // This is the start of an escape sequence. + a.state = ansiEscape + } else { + // Just a regular rune. Send to buffer. + if _, err := a.buffer.WriteRune(r); err != nil { + return 0, err + } + } + } + } + + // Write buffer to target writer. + n, err := a.buffer.WriteTo(a.Writer) + if err != nil { + return int(n), err + } + return len(text), nil +} + +// TranslateANSI replaces ANSI escape sequences found in the provided string +// with tview's color tags and returns the resulting string. +func TranslateANSI(text string) string { + var buffer bytes.Buffer + writer := ANSIWriter(&buffer) + writer.Write([]byte(text)) + return buffer.String() +} diff --git a/vendor/github.com/rivo/tview/application.go b/vendor/github.com/rivo/tview/application.go new file mode 100644 index 00000000000..03d5f0d94db --- /dev/null +++ b/vendor/github.com/rivo/tview/application.go @@ -0,0 +1,505 @@ +package tview + +import ( + "sync" + + "github.com/gdamore/tcell" +) + +// The size of the event/update/redraw channels. +const queueSize = 100 + +// Application represents the top node of an application. +// +// It is not strictly required to use this class as none of the other classes +// depend on it. However, it provides useful tools to set up an application and +// plays nicely with all widgets. +// +// The following command displays a primitive p on the screen until Ctrl-C is +// pressed: +// +// if err := tview.NewApplication().SetRoot(p, true).Run(); err != nil { +// panic(err) +// } +type Application struct { + sync.RWMutex + + // The application's screen. Apart from Run(), this variable should never be + // set directly. Always use the screenReplacement channel after calling + // Fini(), to set a new screen (or nil to stop the application). + screen tcell.Screen + + // The primitive which currently has the keyboard focus. + focus Primitive + + // The root primitive to be seen on the screen. + root Primitive + + // Whether or not the application resizes the root primitive. + rootFullscreen bool + + // An optional capture function which receives a key event and returns the + // event to be forwarded to the default input handler (nil if nothing should + // be forwarded). + inputCapture func(event *tcell.EventKey) *tcell.EventKey + + // An optional callback function which is invoked just before the root + // primitive is drawn. + beforeDraw func(screen tcell.Screen) bool + + // An optional callback function which is invoked after the root primitive + // was drawn. + afterDraw func(screen tcell.Screen) + + // Used to send screen events from separate goroutine to main event loop + events chan tcell.Event + + // Functions queued from goroutines, used to serialize updates to primitives. + updates chan func() + + // An object that the screen variable will be set to after Fini() was called. + // Use this channel to set a new screen object for the application + // (screen.Init() and draw() will be called implicitly). A value of nil will + // stop the application. + screenReplacement chan tcell.Screen +} + +// NewApplication creates and returns a new application. +func NewApplication() *Application { + return &Application{ + events: make(chan tcell.Event, queueSize), + updates: make(chan func(), queueSize), + screenReplacement: make(chan tcell.Screen, 1), + } +} + +// SetInputCapture sets a function which captures all key events before they are +// forwarded to the key event handler of the primitive which currently has +// focus. This function can then choose to forward that key event (or a +// different one) by returning it or stop the key event processing by returning +// nil. +// +// Note that this also affects the default event handling of the application +// itself: Such a handler can intercept the Ctrl-C event which closes the +// applicatoon. +func (a *Application) SetInputCapture(capture func(event *tcell.EventKey) *tcell.EventKey) *Application { + a.inputCapture = capture + return a +} + +// GetInputCapture returns the function installed with SetInputCapture() or nil +// if no such function has been installed. +func (a *Application) GetInputCapture() func(event *tcell.EventKey) *tcell.EventKey { + return a.inputCapture +} + +// SetScreen allows you to provide your own tcell.Screen object. For most +// applications, this is not needed and you should be familiar with +// tcell.Screen when using this function. +// +// This function is typically called before the first call to Run(). Init() need +// not be called on the screen. +func (a *Application) SetScreen(screen tcell.Screen) *Application { + if screen == nil { + return a // Invalid input. Do nothing. + } + + a.Lock() + if a.screen == nil { + // Run() has not been called yet. + a.screen = screen + a.Unlock() + return a + } + + // Run() is already in progress. Exchange screen. + oldScreen := a.screen + a.Unlock() + oldScreen.Fini() + a.screenReplacement <- screen + + return a +} + +// Run starts the application and thus the event loop. This function returns +// when Stop() was called. +func (a *Application) Run() error { + var err error + a.Lock() + + // Make a screen if there is none yet. + if a.screen == nil { + a.screen, err = tcell.NewScreen() + if err != nil { + a.Unlock() + return err + } + if err = a.screen.Init(); err != nil { + a.Unlock() + return err + } + } + + // We catch panics to clean up because they mess up the terminal. + defer func() { + if p := recover(); p != nil { + if a.screen != nil { + a.screen.Fini() + } + panic(p) + } + }() + + // Draw the screen for the first time. + a.Unlock() + a.draw() + + // Separate loop to wait for screen events. + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + a.RLock() + screen := a.screen + a.RUnlock() + if screen == nil { + // We have no screen. Let's stop. + a.QueueEvent(nil) + break + } + + // Wait for next event and queue it. + event := screen.PollEvent() + if event != nil { + // Regular event. Queue. + a.QueueEvent(event) + continue + } + + // A screen was finalized (event is nil). Wait for a new scren. + screen = <-a.screenReplacement + if screen == nil { + // No new screen. We're done. + a.QueueEvent(nil) + return + } + + // We have a new screen. Keep going. + a.Lock() + a.screen = screen + a.Unlock() + + // Initialize and draw this screen. + if err := screen.Init(); err != nil { + panic(err) + } + a.draw() + } + }() + + // Start event loop. +EventLoop: + for { + select { + case event := <-a.events: + if event == nil { + break EventLoop + } + + switch event := event.(type) { + case *tcell.EventKey: + a.RLock() + p := a.focus + inputCapture := a.inputCapture + a.RUnlock() + + // Intercept keys. + if inputCapture != nil { + event = inputCapture(event) + if event == nil { + a.draw() + continue // Don't forward event. + } + } + + // Ctrl-C closes the application. + if event.Key() == tcell.KeyCtrlC { + a.Stop() + } + + // Pass other key events to the currently focused primitive. + if p != nil { + if handler := p.InputHandler(); handler != nil { + handler(event, func(p Primitive) { + a.SetFocus(p) + }) + a.draw() + } + } + case *tcell.EventResize: + a.RLock() + screen := a.screen + a.RUnlock() + if screen == nil { + continue + } + screen.Clear() + a.draw() + } + + // If we have updates, now is the time to execute them. + case updater := <-a.updates: + updater() + } + } + + // Wait for the event loop to finish. + wg.Wait() + a.screen = nil + + return nil +} + +// Stop stops the application, causing Run() to return. +func (a *Application) Stop() { + a.Lock() + defer a.Unlock() + screen := a.screen + if screen == nil { + return + } + a.screen = nil + screen.Fini() + a.screenReplacement <- nil +} + +// Suspend temporarily suspends the application by exiting terminal UI mode and +// invoking the provided function "f". When "f" returns, terminal UI mode is +// entered again and the application resumes. +// +// A return value of true indicates that the application was suspended and "f" +// was called. If false is returned, the application was already suspended, +// terminal UI mode was not exited, and "f" was not called. +func (a *Application) Suspend(f func()) bool { + a.RLock() + screen := a.screen + a.RUnlock() + if screen == nil { + return false // Screen has not yet been initialized. + } + + // Enter suspended mode. + screen.Fini() + + // Wait for "f" to return. + f() + + // Make a new screen. + var err error + screen, err = tcell.NewScreen() + if err != nil { + panic(err) + } + a.screenReplacement <- screen + // One key event will get lost, see https://github.com/gdamore/tcell/issues/194 + + // Continue application loop. + return true +} + +// Draw refreshes the screen (during the next update cycle). It calls the Draw() +// function of the application's root primitive and then syncs the screen +// buffer. +func (a *Application) Draw() *Application { + a.QueueUpdate(func() { + a.draw() + }) + return a +} + +// ForceDraw refreshes the screen immediately. Use this function with caution as +// it may lead to race conditions with updates to primitives in other +// goroutines. It is always preferrable to use Draw() instead. Never call this +// function from a goroutine. +// +// It is safe to call this function during queued updates and direct event +// handling. +func (a *Application) ForceDraw() *Application { + return a.draw() +} + +// draw actually does what Draw() promises to do. +func (a *Application) draw() *Application { + a.Lock() + defer a.Unlock() + + screen := a.screen + root := a.root + fullscreen := a.rootFullscreen + before := a.beforeDraw + after := a.afterDraw + + // Maybe we're not ready yet or not anymore. + if screen == nil || root == nil { + return a + } + + // Resize if requested. + if fullscreen && root != nil { + width, height := screen.Size() + root.SetRect(0, 0, width, height) + } + + // Call before handler if there is one. + if before != nil { + if before(screen) { + screen.Show() + return a + } + } + + // Draw all primitives. + root.Draw(screen) + + // Call after handler if there is one. + if after != nil { + after(screen) + } + + // Sync screen. + screen.Show() + + return a +} + +// SetBeforeDrawFunc installs a callback function which is invoked just before +// the root primitive is drawn during screen updates. If the function returns +// true, drawing will not continue, i.e. the root primitive will not be drawn +// (and an after-draw-handler will not be called). +// +// Note that the screen is not cleared by the application. To clear the screen, +// you may call screen.Clear(). +// +// Provide nil to uninstall the callback function. +func (a *Application) SetBeforeDrawFunc(handler func(screen tcell.Screen) bool) *Application { + a.beforeDraw = handler + return a +} + +// GetBeforeDrawFunc returns the callback function installed with +// SetBeforeDrawFunc() or nil if none has been installed. +func (a *Application) GetBeforeDrawFunc() func(screen tcell.Screen) bool { + return a.beforeDraw +} + +// SetAfterDrawFunc installs a callback function which is invoked after the root +// primitive was drawn during screen updates. +// +// Provide nil to uninstall the callback function. +func (a *Application) SetAfterDrawFunc(handler func(screen tcell.Screen)) *Application { + a.afterDraw = handler + return a +} + +// GetAfterDrawFunc returns the callback function installed with +// SetAfterDrawFunc() or nil if none has been installed. +func (a *Application) GetAfterDrawFunc() func(screen tcell.Screen) { + return a.afterDraw +} + +// SetRoot sets the root primitive for this application. If "fullscreen" is set +// to true, the root primitive's position will be changed to fill the screen. +// +// This function must be called at least once or nothing will be displayed when +// the application starts. +// +// It also calls SetFocus() on the primitive. +func (a *Application) SetRoot(root Primitive, fullscreen bool) *Application { + a.Lock() + a.root = root + a.rootFullscreen = fullscreen + if a.screen != nil { + a.screen.Clear() + } + a.Unlock() + + a.SetFocus(root) + + return a +} + +// ResizeToFullScreen resizes the given primitive such that it fills the entire +// screen. +func (a *Application) ResizeToFullScreen(p Primitive) *Application { + a.RLock() + width, height := a.screen.Size() + a.RUnlock() + p.SetRect(0, 0, width, height) + return a +} + +// SetFocus sets the focus on a new primitive. All key events will be redirected +// to that primitive. Callers must ensure that the primitive will handle key +// events. +// +// Blur() will be called on the previously focused primitive. Focus() will be +// called on the new primitive. +func (a *Application) SetFocus(p Primitive) *Application { + a.Lock() + if a.focus != nil { + a.focus.Blur() + } + a.focus = p + if a.screen != nil { + a.screen.HideCursor() + } + a.Unlock() + if p != nil { + p.Focus(func(p Primitive) { + a.SetFocus(p) + }) + } + + return a +} + +// GetFocus returns the primitive which has the current focus. If none has it, +// nil is returned. +func (a *Application) GetFocus() Primitive { + a.RLock() + defer a.RUnlock() + return a.focus +} + +// QueueUpdate is used to synchronize access to primitives from non-main +// goroutines. The provided function will be executed as part of the event loop +// and thus will not cause race conditions with other such update functions or +// the Draw() function. +// +// Note that Draw() is not implicitly called after the execution of f as that +// may not be desirable. You can call Draw() from f if the screen should be +// refreshed after each update. Alternatively, use QueueUpdateDraw() to follow +// up with an immediate refresh of the screen. +func (a *Application) QueueUpdate(f func()) *Application { + a.updates <- f + return a +} + +// QueueUpdateDraw works like QueueUpdate() except it refreshes the screen +// immediately after executing f. +func (a *Application) QueueUpdateDraw(f func()) *Application { + a.QueueUpdate(func() { + f() + a.draw() + }) + return a +} + +// QueueEvent sends an event to the Application event loop. +// +// It is not recommended for event to be nil. +func (a *Application) QueueEvent(event tcell.Event) *Application { + a.events <- event + return a +} diff --git a/vendor/github.com/rivo/tview/borders.go b/vendor/github.com/rivo/tview/borders.go new file mode 100644 index 00000000000..946c8783464 --- /dev/null +++ b/vendor/github.com/rivo/tview/borders.go @@ -0,0 +1,45 @@ +package tview + +// Borders defines various borders used when primitives are drawn. +// These may be changed to accommodate a different look and feel. +var Borders = struct { + Horizontal rune + Vertical rune + TopLeft rune + TopRight rune + BottomLeft rune + BottomRight rune + + LeftT rune + RightT rune + TopT rune + BottomT rune + Cross rune + + HorizontalFocus rune + VerticalFocus rune + TopLeftFocus rune + TopRightFocus rune + BottomLeftFocus rune + BottomRightFocus rune +}{ + Horizontal: BoxDrawingsLightHorizontal, + Vertical: BoxDrawingsLightVertical, + TopLeft: BoxDrawingsLightDownAndRight, + TopRight: BoxDrawingsLightDownAndLeft, + BottomLeft: BoxDrawingsLightUpAndRight, + BottomRight: BoxDrawingsLightUpAndLeft, + + LeftT: BoxDrawingsLightVerticalAndRight, + RightT: BoxDrawingsLightVerticalAndLeft, + TopT: BoxDrawingsLightDownAndHorizontal, + BottomT: BoxDrawingsLightUpAndHorizontal, + Cross: BoxDrawingsLightVerticalAndHorizontal, + + HorizontalFocus: BoxDrawingsDoubleHorizontal, + VerticalFocus: BoxDrawingsDoubleVertical, + TopLeftFocus: BoxDrawingsDoubleDownAndRight, + TopRightFocus: BoxDrawingsDoubleDownAndLeft, + BottomLeftFocus: BoxDrawingsDoubleUpAndRight, + BottomRightFocus: BoxDrawingsDoubleUpAndLeft, +} diff --git a/vendor/github.com/rivo/tview/box.go b/vendor/github.com/rivo/tview/box.go new file mode 100644 index 00000000000..6bcd6d2a0a5 --- /dev/null +++ b/vendor/github.com/rivo/tview/box.go @@ -0,0 +1,341 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// Box implements Primitive with a background and optional elements such as a +// border and a title. Most subclasses keep their content contained in the box +// but don't necessarily have to. +// +// Note that all classes which subclass from Box will also have access to its +// functions. +// +// See https://github.com/rivo/tview/wiki/Box for an example. +type Box struct { + // The position of the rect. + x, y, width, height int + + // The inner rect reserved for the box's content. + innerX, innerY, innerWidth, innerHeight int + + // Border padding. + paddingTop, paddingBottom, paddingLeft, paddingRight int + + // The box's background color. + backgroundColor tcell.Color + + // Whether or not a border is drawn, reducing the box's space for content by + // two in width and height. + border bool + + // The color of the border. + borderColor tcell.Color + + // The style attributes of the border. + borderAttributes tcell.AttrMask + + // The title. Only visible if there is a border, too. + title string + + // The color of the title. + titleColor tcell.Color + + // The alignment of the title. + titleAlign int + + // Provides a way to find out if this box has focus. We always go through + // this interface because it may be overridden by implementing classes. + focus Focusable + + // Whether or not this box has focus. + hasFocus bool + + // An optional capture function which receives a key event and returns the + // event to be forwarded to the primitive's default input handler (nil if + // nothing should be forwarded). + inputCapture func(event *tcell.EventKey) *tcell.EventKey + + // An optional function which is called before the box is drawn. + draw func(screen tcell.Screen, x, y, width, height int) (int, int, int, int) +} + +// NewBox returns a Box without a border. +func NewBox() *Box { + b := &Box{ + width: 15, + height: 10, + innerX: -1, // Mark as uninitialized. + backgroundColor: Styles.PrimitiveBackgroundColor, + borderColor: Styles.BorderColor, + titleColor: Styles.TitleColor, + titleAlign: AlignCenter, + } + b.focus = b + return b +} + +// SetBorderPadding sets the size of the borders around the box content. +func (b *Box) SetBorderPadding(top, bottom, left, right int) *Box { + b.paddingTop, b.paddingBottom, b.paddingLeft, b.paddingRight = top, bottom, left, right + return b +} + +// GetRect returns the current position of the rectangle, x, y, width, and +// height. +func (b *Box) GetRect() (int, int, int, int) { + return b.x, b.y, b.width, b.height +} + +// GetInnerRect returns the position of the inner rectangle (x, y, width, +// height), without the border and without any padding. +func (b *Box) GetInnerRect() (int, int, int, int) { + if b.innerX >= 0 { + return b.innerX, b.innerY, b.innerWidth, b.innerHeight + } + x, y, width, height := b.GetRect() + if b.border { + x++ + y++ + width -= 2 + height -= 2 + } + return x + b.paddingLeft, + y + b.paddingTop, + width - b.paddingLeft - b.paddingRight, + height - b.paddingTop - b.paddingBottom +} + +// SetRect sets a new position of the primitive. Note that this has no effect +// if this primitive is part of a layout (e.g. Flex, Grid) or if it was added +// like this: +// +// application.SetRoot(b, true) +func (b *Box) SetRect(x, y, width, height int) { + b.x = x + b.y = y + b.width = width + b.height = height + b.innerX = -1 // Mark inner rect as uninitialized. +} + +// SetDrawFunc sets a callback function which is invoked after the box primitive +// has been drawn. This allows you to add a more individual style to the box +// (and all primitives which extend it). +// +// The function is provided with the box's dimensions (set via SetRect()). It +// must return the box's inner dimensions (x, y, width, height) which will be +// returned by GetInnerRect(), used by descendent primitives to draw their own +// content. +func (b *Box) SetDrawFunc(handler func(screen tcell.Screen, x, y, width, height int) (int, int, int, int)) *Box { + b.draw = handler + return b +} + +// GetDrawFunc returns the callback function which was installed with +// SetDrawFunc() or nil if no such function has been installed. +func (b *Box) GetDrawFunc() func(screen tcell.Screen, x, y, width, height int) (int, int, int, int) { + return b.draw +} + +// WrapInputHandler wraps an input handler (see InputHandler()) with the +// functionality to capture input (see SetInputCapture()) before passing it +// on to the provided (default) input handler. +// +// This is only meant to be used by subclassing primitives. +func (b *Box) WrapInputHandler(inputHandler func(*tcell.EventKey, func(p Primitive))) func(*tcell.EventKey, func(p Primitive)) { + return func(event *tcell.EventKey, setFocus func(p Primitive)) { + if b.inputCapture != nil { + event = b.inputCapture(event) + } + if event != nil && inputHandler != nil { + inputHandler(event, setFocus) + } + } +} + +// InputHandler returns nil. +func (b *Box) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return b.WrapInputHandler(nil) +} + +// SetInputCapture installs a function which captures key events before they are +// forwarded to the primitive's default key event handler. This function can +// then choose to forward that key event (or a different one) to the default +// handler by returning it. If nil is returned, the default handler will not +// be called. +// +// Providing a nil handler will remove a previously existing handler. +// +// Note that this function will not have an effect on primitives composed of +// other primitives, such as Form, Flex, or Grid. Key events are only captured +// by the primitives that have focus (e.g. InputField) and only one primitive +// can have focus at a time. Composing primitives such as Form pass the focus on +// to their contained primitives and thus never receive any key events +// themselves. Therefore, they cannot intercept key events. +func (b *Box) SetInputCapture(capture func(event *tcell.EventKey) *tcell.EventKey) *Box { + b.inputCapture = capture + return b +} + +// GetInputCapture returns the function installed with SetInputCapture() or nil +// if no such function has been installed. +func (b *Box) GetInputCapture() func(event *tcell.EventKey) *tcell.EventKey { + return b.inputCapture +} + +// SetBackgroundColor sets the box's background color. +func (b *Box) SetBackgroundColor(color tcell.Color) *Box { + b.backgroundColor = color + return b +} + +// SetBorder sets the flag indicating whether or not the box should have a +// border. +func (b *Box) SetBorder(show bool) *Box { + b.border = show + return b +} + +// SetBorderColor sets the box's border color. +func (b *Box) SetBorderColor(color tcell.Color) *Box { + b.borderColor = color + return b +} + +// SetBorderAttributes sets the border's style attributes. You can combine +// different attributes using bitmask operations: +// +// box.SetBorderAttributes(tcell.AttrUnderline | tcell.AttrBold) +func (b *Box) SetBorderAttributes(attr tcell.AttrMask) *Box { + b.borderAttributes = attr + return b +} + +// SetTitle sets the box's title. +func (b *Box) SetTitle(title string) *Box { + b.title = title + return b +} + +// SetTitleColor sets the box's title color. +func (b *Box) SetTitleColor(color tcell.Color) *Box { + b.titleColor = color + return b +} + +// SetTitleAlign sets the alignment of the title, one of AlignLeft, AlignCenter, +// or AlignRight. +func (b *Box) SetTitleAlign(align int) *Box { + b.titleAlign = align + return b +} + +// Draw draws this primitive onto the screen. +func (b *Box) Draw(screen tcell.Screen) { + // Don't draw anything if there is no space. + if b.width <= 0 || b.height <= 0 { + return + } + + def := tcell.StyleDefault + + // Fill background. + background := def.Background(b.backgroundColor) + if b.backgroundColor != tcell.ColorDefault { + for y := b.y; y < b.y+b.height; y++ { + for x := b.x; x < b.x+b.width; x++ { + screen.SetContent(x, y, ' ', nil, background) + } + } + } + + // Draw border. + if b.border && b.width >= 2 && b.height >= 2 { + border := background.Foreground(b.borderColor) | tcell.Style(b.borderAttributes) + var vertical, horizontal, topLeft, topRight, bottomLeft, bottomRight rune + if b.focus.HasFocus() { + horizontal = Borders.HorizontalFocus + vertical = Borders.VerticalFocus + topLeft = Borders.TopLeftFocus + topRight = Borders.TopRightFocus + bottomLeft = Borders.BottomLeftFocus + bottomRight = Borders.BottomRightFocus + } else { + horizontal = Borders.Horizontal + vertical = Borders.Vertical + topLeft = Borders.TopLeft + topRight = Borders.TopRight + bottomLeft = Borders.BottomLeft + bottomRight = Borders.BottomRight + } + for x := b.x + 1; x < b.x+b.width-1; x++ { + screen.SetContent(x, b.y, horizontal, nil, border) + screen.SetContent(x, b.y+b.height-1, horizontal, nil, border) + } + for y := b.y + 1; y < b.y+b.height-1; y++ { + screen.SetContent(b.x, y, vertical, nil, border) + screen.SetContent(b.x+b.width-1, y, vertical, nil, border) + } + screen.SetContent(b.x, b.y, topLeft, nil, border) + screen.SetContent(b.x+b.width-1, b.y, topRight, nil, border) + screen.SetContent(b.x, b.y+b.height-1, bottomLeft, nil, border) + screen.SetContent(b.x+b.width-1, b.y+b.height-1, bottomRight, nil, border) + + // Draw title. + if b.title != "" && b.width >= 4 { + printed, _ := Print(screen, b.title, b.x+1, b.y, b.width-2, b.titleAlign, b.titleColor) + if len(b.title)-printed > 0 && printed > 0 { + _, _, style, _ := screen.GetContent(b.x+b.width-2, b.y) + fg, _, _ := style.Decompose() + Print(screen, string(SemigraphicsHorizontalEllipsis), b.x+b.width-2, b.y, 1, AlignLeft, fg) + } + } + } + + // Call custom draw function. + if b.draw != nil { + b.innerX, b.innerY, b.innerWidth, b.innerHeight = b.draw(screen, b.x, b.y, b.width, b.height) + } else { + // Remember the inner rect. + b.innerX = -1 + b.innerX, b.innerY, b.innerWidth, b.innerHeight = b.GetInnerRect() + } + + // Clamp inner rect to screen. + width, height := screen.Size() + if b.innerX < 0 { + b.innerWidth += b.innerX + b.innerX = 0 + } + if b.innerX+b.innerWidth >= width { + b.innerWidth = width - b.innerX + } + if b.innerY+b.innerHeight >= height { + b.innerHeight = height - b.innerY + } + if b.innerY < 0 { + b.innerHeight += b.innerY + b.innerY = 0 + } +} + +// Focus is called when this primitive receives focus. +func (b *Box) Focus(delegate func(p Primitive)) { + b.hasFocus = true +} + +// Blur is called when this primitive loses focus. +func (b *Box) Blur() { + b.hasFocus = false +} + +// HasFocus returns whether or not this primitive has focus. +func (b *Box) HasFocus() bool { + return b.hasFocus +} + +// GetFocusable returns the item's Focusable. +func (b *Box) GetFocusable() Focusable { + return b.focus +} diff --git a/vendor/github.com/rivo/tview/button.go b/vendor/github.com/rivo/tview/button.go new file mode 100644 index 00000000000..5efc31aee7f --- /dev/null +++ b/vendor/github.com/rivo/tview/button.go @@ -0,0 +1,137 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// Button is labeled box that triggers an action when selected. +// +// See https://github.com/rivo/tview/wiki/Button for an example. +type Button struct { + *Box + + // The text to be displayed before the input area. + label string + + // The label color. + labelColor tcell.Color + + // The label color when the button is in focus. + labelColorActivated tcell.Color + + // The background color when the button is in focus. + backgroundColorActivated tcell.Color + + // An optional function which is called when the button was selected. + selected func() + + // An optional function which is called when the user leaves the button. A + // key is provided indicating which key was pressed to leave (tab or backtab). + blur func(tcell.Key) +} + +// NewButton returns a new input field. +func NewButton(label string) *Button { + box := NewBox().SetBackgroundColor(Styles.ContrastBackgroundColor) + box.SetRect(0, 0, StringWidth(label)+4, 1) + return &Button{ + Box: box, + label: label, + labelColor: Styles.PrimaryTextColor, + labelColorActivated: Styles.InverseTextColor, + backgroundColorActivated: Styles.PrimaryTextColor, + } +} + +// SetLabel sets the button text. +func (b *Button) SetLabel(label string) *Button { + b.label = label + return b +} + +// GetLabel returns the button text. +func (b *Button) GetLabel() string { + return b.label +} + +// SetLabelColor sets the color of the button text. +func (b *Button) SetLabelColor(color tcell.Color) *Button { + b.labelColor = color + return b +} + +// SetLabelColorActivated sets the color of the button text when the button is +// in focus. +func (b *Button) SetLabelColorActivated(color tcell.Color) *Button { + b.labelColorActivated = color + return b +} + +// SetBackgroundColorActivated sets the background color of the button text when +// the button is in focus. +func (b *Button) SetBackgroundColorActivated(color tcell.Color) *Button { + b.backgroundColorActivated = color + return b +} + +// SetSelectedFunc sets a handler which is called when the button was selected. +func (b *Button) SetSelectedFunc(handler func()) *Button { + b.selected = handler + return b +} + +// SetBlurFunc sets a handler which is called when the user leaves the button. +// The callback function is provided with the key that was pressed, which is one +// of the following: +// +// - KeyEscape: Leaving the button with no specific direction. +// - KeyTab: Move to the next field. +// - KeyBacktab: Move to the previous field. +func (b *Button) SetBlurFunc(handler func(key tcell.Key)) *Button { + b.blur = handler + return b +} + +// Draw draws this primitive onto the screen. +func (b *Button) Draw(screen tcell.Screen) { + // Draw the box. + borderColor := b.borderColor + backgroundColor := b.backgroundColor + if b.focus.HasFocus() { + b.backgroundColor = b.backgroundColorActivated + b.borderColor = b.labelColorActivated + defer func() { + b.borderColor = borderColor + }() + } + b.Box.Draw(screen) + b.backgroundColor = backgroundColor + + // Draw label. + x, y, width, height := b.GetInnerRect() + if width > 0 && height > 0 { + y = y + height/2 + labelColor := b.labelColor + if b.focus.HasFocus() { + labelColor = b.labelColorActivated + } + Print(screen, b.label, x, y, width, AlignCenter, labelColor) + } +} + +// InputHandler returns the handler for this primitive. +func (b *Button) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return b.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + // Process key event. + switch key := event.Key(); key { + case tcell.KeyEnter: // Selected. + if b.selected != nil { + b.selected() + } + case tcell.KeyBacktab, tcell.KeyTab, tcell.KeyEscape: // Leave. No action. + if b.blur != nil { + b.blur(key) + } + } + }) +} diff --git a/vendor/github.com/rivo/tview/checkbox.go b/vendor/github.com/rivo/tview/checkbox.go new file mode 100644 index 00000000000..8f099d8b6d5 --- /dev/null +++ b/vendor/github.com/rivo/tview/checkbox.go @@ -0,0 +1,203 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// Checkbox implements a simple box for boolean values which can be checked and +// unchecked. +// +// See https://github.com/rivo/tview/wiki/Checkbox for an example. +type Checkbox struct { + *Box + + // Whether or not this box is checked. + checked bool + + // The text to be displayed before the input area. + label string + + // The screen width of the label area. A value of 0 means use the width of + // the label text. + labelWidth int + + // The label color. + labelColor tcell.Color + + // The background color of the input area. + fieldBackgroundColor tcell.Color + + // The text color of the input area. + fieldTextColor tcell.Color + + // An optional function which is called when the user changes the checked + // state of this checkbox. + changed func(checked bool) + + // An optional function which is called when the user indicated that they + // are done entering text. The key which was pressed is provided (tab, + // shift-tab, or escape). + done func(tcell.Key) + + // A callback function set by the Form class and called when the user leaves + // this form item. + finished func(tcell.Key) +} + +// NewCheckbox returns a new input field. +func NewCheckbox() *Checkbox { + return &Checkbox{ + Box: NewBox(), + labelColor: Styles.SecondaryTextColor, + fieldBackgroundColor: Styles.ContrastBackgroundColor, + fieldTextColor: Styles.PrimaryTextColor, + } +} + +// SetChecked sets the state of the checkbox. +func (c *Checkbox) SetChecked(checked bool) *Checkbox { + c.checked = checked + return c +} + +// IsChecked returns whether or not the box is checked. +func (c *Checkbox) IsChecked() bool { + return c.checked +} + +// SetLabel sets the text to be displayed before the input area. +func (c *Checkbox) SetLabel(label string) *Checkbox { + c.label = label + return c +} + +// GetLabel returns the text to be displayed before the input area. +func (c *Checkbox) GetLabel() string { + return c.label +} + +// SetLabelWidth sets the screen width of the label. A value of 0 will cause the +// primitive to use the width of the label string. +func (c *Checkbox) SetLabelWidth(width int) *Checkbox { + c.labelWidth = width + return c +} + +// SetLabelColor sets the color of the label. +func (c *Checkbox) SetLabelColor(color tcell.Color) *Checkbox { + c.labelColor = color + return c +} + +// SetFieldBackgroundColor sets the background color of the input area. +func (c *Checkbox) SetFieldBackgroundColor(color tcell.Color) *Checkbox { + c.fieldBackgroundColor = color + return c +} + +// SetFieldTextColor sets the text color of the input area. +func (c *Checkbox) SetFieldTextColor(color tcell.Color) *Checkbox { + c.fieldTextColor = color + return c +} + +// SetFormAttributes sets attributes shared by all form items. +func (c *Checkbox) SetFormAttributes(labelWidth int, labelColor, bgColor, fieldTextColor, fieldBgColor tcell.Color) FormItem { + c.labelWidth = labelWidth + c.labelColor = labelColor + c.backgroundColor = bgColor + c.fieldTextColor = fieldTextColor + c.fieldBackgroundColor = fieldBgColor + return c +} + +// GetFieldWidth returns this primitive's field width. +func (c *Checkbox) GetFieldWidth() int { + return 1 +} + +// SetChangedFunc sets a handler which is called when the checked state of this +// checkbox was changed by the user. The handler function receives the new +// state. +func (c *Checkbox) SetChangedFunc(handler func(checked bool)) *Checkbox { + c.changed = handler + return c +} + +// SetDoneFunc sets a handler which is called when the user is done using the +// checkbox. The callback function is provided with the key that was pressed, +// which is one of the following: +// +// - KeyEscape: Abort text input. +// - KeyTab: Move to the next field. +// - KeyBacktab: Move to the previous field. +func (c *Checkbox) SetDoneFunc(handler func(key tcell.Key)) *Checkbox { + c.done = handler + return c +} + +// SetFinishedFunc sets a callback invoked when the user leaves this form item. +func (c *Checkbox) SetFinishedFunc(handler func(key tcell.Key)) FormItem { + c.finished = handler + return c +} + +// Draw draws this primitive onto the screen. +func (c *Checkbox) Draw(screen tcell.Screen) { + c.Box.Draw(screen) + + // Prepare + x, y, width, height := c.GetInnerRect() + rightLimit := x + width + if height < 1 || rightLimit <= x { + return + } + + // Draw label. + if c.labelWidth > 0 { + labelWidth := c.labelWidth + if labelWidth > rightLimit-x { + labelWidth = rightLimit - x + } + Print(screen, c.label, x, y, labelWidth, AlignLeft, c.labelColor) + x += labelWidth + } else { + _, drawnWidth := Print(screen, c.label, x, y, rightLimit-x, AlignLeft, c.labelColor) + x += drawnWidth + } + + // Draw checkbox. + fieldStyle := tcell.StyleDefault.Background(c.fieldBackgroundColor).Foreground(c.fieldTextColor) + if c.focus.HasFocus() { + fieldStyle = fieldStyle.Background(c.fieldTextColor).Foreground(c.fieldBackgroundColor) + } + checkedRune := 'X' + if !c.checked { + checkedRune = ' ' + } + screen.SetContent(x, y, checkedRune, nil, fieldStyle) +} + +// InputHandler returns the handler for this primitive. +func (c *Checkbox) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return c.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + // Process key event. + switch key := event.Key(); key { + case tcell.KeyRune, tcell.KeyEnter: // Check. + if key == tcell.KeyRune && event.Rune() != ' ' { + break + } + c.checked = !c.checked + if c.changed != nil { + c.changed(c.checked) + } + case tcell.KeyTab, tcell.KeyBacktab, tcell.KeyEscape: // We're done. + if c.done != nil { + c.done(key) + } + if c.finished != nil { + c.finished(key) + } + } + }) +} diff --git a/vendor/github.com/rivo/tview/doc.go b/vendor/github.com/rivo/tview/doc.go new file mode 100644 index 00000000000..4bceea2a392 --- /dev/null +++ b/vendor/github.com/rivo/tview/doc.go @@ -0,0 +1,180 @@ +/* +Package tview implements rich widgets for terminal based user interfaces. The +widgets provided with this package are useful for data exploration and data +entry. + +Widgets + +The package implements the following widgets: + + - TextView: A scrollable window that display multi-colored text. Text may also + be highlighted. + - Table: A scrollable display of tabular data. Table cells, rows, or columns + may also be highlighted. + - TreeView: A scrollable display for hierarchical data. Tree nodes can be + highlighted, collapsed, expanded, and more. + - List: A navigable text list with optional keyboard shortcuts. + - InputField: One-line input fields to enter text. + - DropDown: Drop-down selection fields. + - Checkbox: Selectable checkbox for boolean values. + - Button: Buttons which get activated when the user selects them. + - Form: Forms composed of input fields, drop down selections, checkboxes, and + buttons. + - Modal: A centered window with a text message and one or more buttons. + - Grid: A grid based layout manager. + - Flex: A Flexbox based layout manager. + - Pages: A page based layout manager. + +The package also provides Application which is used to poll the event queue and +draw widgets on screen. + +Hello World + +The following is a very basic example showing a box with the title "Hello, +world!": + + package main + + import ( + "github.com/rivo/tview" + ) + + func main() { + box := tview.NewBox().SetBorder(true).SetTitle("Hello, world!") + if err := tview.NewApplication().SetRoot(box, true).Run(); err != nil { + panic(err) + } + } + +First, we create a box primitive with a border and a title. Then we create an +application, set the box as its root primitive, and run the event loop. The +application exits when the application's Stop() function is called or when +Ctrl-C is pressed. + +If we have a primitive which consumes key presses, we call the application's +SetFocus() function to redirect all key presses to that primitive. Most +primitives then offer ways to install handlers that allow you to react to any +actions performed on them. + +More Demos + +You will find more demos in the "demos" subdirectory. It also contains a +presentation (written using tview) which gives an overview of the different +widgets and how they can be used. + +Colors + +Throughout this package, colors are specified using the tcell.Color type. +Functions such as tcell.GetColor(), tcell.NewHexColor(), and tcell.NewRGBColor() +can be used to create colors from W3C color names or RGB values. + +Almost all strings which are displayed can contain color tags. Color tags are +W3C color names or six hexadecimal digits following a hash tag, wrapped in +square brackets. Examples: + + This is a [red]warning[white]! + The sky is [#8080ff]blue[#ffffff]. + +A color tag changes the color of the characters following that color tag. This +applies to almost everything from box titles, list text, form item labels, to +table cells. In a TextView, this functionality has to be switched on explicitly. +See the TextView documentation for more information. + +Color tags may contain not just the foreground (text) color but also the +background color and additional flags. In fact, the full definition of a color +tag is as follows: + + [::] + +Each of the three fields can be left blank and trailing fields can be omitted. +(Empty square brackets "[]", however, are not considered color tags.) Colors +that are not specified will be left unchanged. A field with just a dash ("-") +means "reset to default". + +You can specify the following flags (some flags may not be supported by your +terminal): + + l: blink + b: bold + d: dim + r: reverse (switch foreground and background color) + u: underline + +Examples: + + [yellow]Yellow text + [yellow:red]Yellow text on red background + [:red]Red background, text color unchanged + [yellow::u]Yellow text underlined + [::bl]Bold, blinking text + [::-]Colors unchanged, flags reset + [-]Reset foreground color + [-:-:-]Reset everything + [:]No effect + []Not a valid color tag, will print square brackets as they are + +In the rare event that you want to display a string such as "[red]" or +"[#00ff1a]" without applying its effect, you need to put an opening square +bracket before the closing square bracket. Note that the text inside the +brackets will be matched less strictly than region or colors tags. I.e. any +character that may be used in color or region tags will be recognized. Examples: + + [red[] will be output as [red] + ["123"[] will be output as ["123"] + [#6aff00[[] will be output as [#6aff00[] + [a#"[[[] will be output as [a#"[[] + [] will be output as [] (see color tags above) + [[] will be output as [[] (not an escaped tag) + +You can use the Escape() function to insert brackets automatically where needed. + +Styles + +When primitives are instantiated, they are initialized with colors taken from +the global Styles variable. You may change this variable to adapt the look and +feel of the primitives to your preferred style. + +Unicode Support + +This package supports unicode characters including wide characters. + +Concurrency + +Many functions in this package are not thread-safe. For many applications, this +may not be an issue: If your code makes changes in response to key events, it +will execute in the main goroutine and thus will not cause any race conditions. + +If you access your primitives from other goroutines, however, you will need to +synchronize execution. The easiest way to do this is to call +Application.QueueUpdate() or Application.QueueUpdateDraw() (see the function +documentation for details): + + go func() { + app.QueueUpdateDraw(func() { + table.SetCellSimple(0, 0, "Foo bar") + }) + }() + +One exception to this is the io.Writer interface implemented by TextView. You +can safely write to a TextView from any goroutine. See the TextView +documentation for details. + +You can also call Application.Draw() from any goroutine without having to wrap +it in QueueUpdate(). And, as mentioned above, key event callbacks are executed +in the main goroutine and thus should not use QueueUpdate() as that may lead to +deadlocks. + +Type Hierarchy + +All widgets listed above contain the Box type. All of Box's functions are +therefore available for all widgets, too. + +All widgets also implement the Primitive interface. There is also the Focusable +interface which is used to override functions in subclassing types. + +The tview package is based on https://github.com/gdamore/tcell. It uses types +and constants from that package (e.g. colors and keyboard values). + +This package does not process mouse input (yet). +*/ +package tview diff --git a/vendor/github.com/rivo/tview/dropdown.go b/vendor/github.com/rivo/tview/dropdown.go new file mode 100644 index 00000000000..79be05ed11e --- /dev/null +++ b/vendor/github.com/rivo/tview/dropdown.go @@ -0,0 +1,426 @@ +package tview + +import ( + "strings" + + "github.com/gdamore/tcell" + runewidth "github.com/mattn/go-runewidth" +) + +// dropDownOption is one option that can be selected in a drop-down primitive. +type dropDownOption struct { + Text string // The text to be displayed in the drop-down. + Selected func() // The (optional) callback for when this option was selected. +} + +// DropDown implements a selection widget whose options become visible in a +// drop-down list when activated. +// +// See https://github.com/rivo/tview/wiki/DropDown for an example. +type DropDown struct { + *Box + + // The options from which the user can choose. + options []*dropDownOption + + // The index of the currently selected option. Negative if no option is + // currently selected. + currentOption int + + // Set to true if the options are visible and selectable. + open bool + + // The runes typed so far to directly access one of the list items. + prefix string + + // The list element for the options. + list *List + + // The text to be displayed before the input area. + label string + + // The label color. + labelColor tcell.Color + + // The background color of the input area. + fieldBackgroundColor tcell.Color + + // The text color of the input area. + fieldTextColor tcell.Color + + // The color for prefixes. + prefixTextColor tcell.Color + + // The screen width of the label area. A value of 0 means use the width of + // the label text. + labelWidth int + + // The screen width of the input area. A value of 0 means extend as much as + // possible. + fieldWidth int + + // An optional function which is called when the user indicated that they + // are done selecting options. The key which was pressed is provided (tab, + // shift-tab, or escape). + done func(tcell.Key) + + // A callback function set by the Form class and called when the user leaves + // this form item. + finished func(tcell.Key) + + // A callback function which is called when the user changes the drop-down's + // selection. + selected func(text string, index int) +} + +// NewDropDown returns a new drop-down. +func NewDropDown() *DropDown { + list := NewList().ShowSecondaryText(false) + list.SetMainTextColor(Styles.PrimitiveBackgroundColor). + SetSelectedTextColor(Styles.PrimitiveBackgroundColor). + SetSelectedBackgroundColor(Styles.PrimaryTextColor). + SetBackgroundColor(Styles.MoreContrastBackgroundColor) + + d := &DropDown{ + Box: NewBox(), + currentOption: -1, + list: list, + labelColor: Styles.SecondaryTextColor, + fieldBackgroundColor: Styles.ContrastBackgroundColor, + fieldTextColor: Styles.PrimaryTextColor, + prefixTextColor: Styles.ContrastSecondaryTextColor, + } + + d.focus = d + + return d +} + +// SetCurrentOption sets the index of the currently selected option. This may +// be a negative value to indicate that no option is currently selected. +func (d *DropDown) SetCurrentOption(index int) *DropDown { + d.currentOption = index + d.list.SetCurrentItem(index) + return d +} + +// GetCurrentOption returns the index of the currently selected option as well +// as its text. If no option was selected, -1 and an empty string is returned. +func (d *DropDown) GetCurrentOption() (int, string) { + var text string + if d.currentOption >= 0 && d.currentOption < len(d.options) { + text = d.options[d.currentOption].Text + } + return d.currentOption, text +} + +// SetLabel sets the text to be displayed before the input area. +func (d *DropDown) SetLabel(label string) *DropDown { + d.label = label + return d +} + +// GetLabel returns the text to be displayed before the input area. +func (d *DropDown) GetLabel() string { + return d.label +} + +// SetLabelWidth sets the screen width of the label. A value of 0 will cause the +// primitive to use the width of the label string. +func (d *DropDown) SetLabelWidth(width int) *DropDown { + d.labelWidth = width + return d +} + +// SetLabelColor sets the color of the label. +func (d *DropDown) SetLabelColor(color tcell.Color) *DropDown { + d.labelColor = color + return d +} + +// SetFieldBackgroundColor sets the background color of the options area. +func (d *DropDown) SetFieldBackgroundColor(color tcell.Color) *DropDown { + d.fieldBackgroundColor = color + return d +} + +// SetFieldTextColor sets the text color of the options area. +func (d *DropDown) SetFieldTextColor(color tcell.Color) *DropDown { + d.fieldTextColor = color + return d +} + +// SetPrefixTextColor sets the color of the prefix string. The prefix string is +// shown when the user starts typing text, which directly selects the first +// option that starts with the typed string. +func (d *DropDown) SetPrefixTextColor(color tcell.Color) *DropDown { + d.prefixTextColor = color + return d +} + +// SetFormAttributes sets attributes shared by all form items. +func (d *DropDown) SetFormAttributes(labelWidth int, labelColor, bgColor, fieldTextColor, fieldBgColor tcell.Color) FormItem { + d.labelWidth = labelWidth + d.labelColor = labelColor + d.backgroundColor = bgColor + d.fieldTextColor = fieldTextColor + d.fieldBackgroundColor = fieldBgColor + return d +} + +// SetFieldWidth sets the screen width of the options area. A value of 0 means +// extend to as long as the longest option text. +func (d *DropDown) SetFieldWidth(width int) *DropDown { + d.fieldWidth = width + return d +} + +// GetFieldWidth returns this primitive's field screen width. +func (d *DropDown) GetFieldWidth() int { + if d.fieldWidth > 0 { + return d.fieldWidth + } + fieldWidth := 0 + for _, option := range d.options { + width := StringWidth(option.Text) + if width > fieldWidth { + fieldWidth = width + } + } + return fieldWidth +} + +// AddOption adds a new selectable option to this drop-down. The "selected" +// callback is called when this option was selected. It may be nil. +func (d *DropDown) AddOption(text string, selected func()) *DropDown { + d.options = append(d.options, &dropDownOption{Text: text, Selected: selected}) + d.list.AddItem(text, "", 0, nil) + return d +} + +// SetOptions replaces all current options with the ones provided and installs +// one callback function which is called when one of the options is selected. +// It will be called with the option's text and its index into the options +// slice. The "selected" parameter may be nil. +func (d *DropDown) SetOptions(texts []string, selected func(text string, index int)) *DropDown { + d.list.Clear() + d.options = nil + for index, text := range texts { + func(t string, i int) { + d.AddOption(text, nil) + }(text, index) + } + d.selected = selected + return d +} + +// SetSelectedFunc sets a handler which is called when the user changes the +// drop-down's option. This handler will be called in addition and prior to +// an option's optional individual handler. The handler is provided with the +// selected option's text and index. +func (d *DropDown) SetSelectedFunc(handler func(text string, index int)) *DropDown { + d.selected = handler + return d +} + +// SetDoneFunc sets a handler which is called when the user is done selecting +// options. The callback function is provided with the key that was pressed, +// which is one of the following: +// +// - KeyEscape: Abort selection. +// - KeyTab: Move to the next field. +// - KeyBacktab: Move to the previous field. +func (d *DropDown) SetDoneFunc(handler func(key tcell.Key)) *DropDown { + d.done = handler + return d +} + +// SetFinishedFunc sets a callback invoked when the user leaves this form item. +func (d *DropDown) SetFinishedFunc(handler func(key tcell.Key)) FormItem { + d.finished = handler + return d +} + +// Draw draws this primitive onto the screen. +func (d *DropDown) Draw(screen tcell.Screen) { + d.Box.Draw(screen) + + // Prepare. + x, y, width, height := d.GetInnerRect() + rightLimit := x + width + if height < 1 || rightLimit <= x { + return + } + + // Draw label. + if d.labelWidth > 0 { + labelWidth := d.labelWidth + if labelWidth > rightLimit-x { + labelWidth = rightLimit - x + } + Print(screen, d.label, x, y, labelWidth, AlignLeft, d.labelColor) + x += labelWidth + } else { + _, drawnWidth := Print(screen, d.label, x, y, rightLimit-x, AlignLeft, d.labelColor) + x += drawnWidth + } + + // What's the longest option text? + maxWidth := 0 + for _, option := range d.options { + strWidth := StringWidth(option.Text) + if strWidth > maxWidth { + maxWidth = strWidth + } + } + + // Draw selection area. + fieldWidth := d.fieldWidth + if fieldWidth == 0 { + fieldWidth = maxWidth + } + if rightLimit-x < fieldWidth { + fieldWidth = rightLimit - x + } + fieldStyle := tcell.StyleDefault.Background(d.fieldBackgroundColor) + if d.GetFocusable().HasFocus() && !d.open { + fieldStyle = fieldStyle.Background(d.fieldTextColor) + } + for index := 0; index < fieldWidth; index++ { + screen.SetContent(x+index, y, ' ', nil, fieldStyle) + } + + // Draw selected text. + if d.open && len(d.prefix) > 0 { + // Show the prefix. + Print(screen, d.prefix, x, y, fieldWidth, AlignLeft, d.prefixTextColor) + prefixWidth := runewidth.StringWidth(d.prefix) + listItemText := d.options[d.list.GetCurrentItem()].Text + if prefixWidth < fieldWidth && len(d.prefix) < len(listItemText) { + Print(screen, listItemText[len(d.prefix):], x+prefixWidth, y, fieldWidth-prefixWidth, AlignLeft, d.fieldTextColor) + } + } else { + if d.currentOption >= 0 && d.currentOption < len(d.options) { + color := d.fieldTextColor + // Just show the current selection. + if d.GetFocusable().HasFocus() && !d.open { + color = d.fieldBackgroundColor + } + Print(screen, d.options[d.currentOption].Text, x, y, fieldWidth, AlignLeft, color) + } + } + + // Draw options list. + if d.HasFocus() && d.open { + // We prefer to drop down but if there is no space, maybe drop up? + lx := x + ly := y + 1 + lwidth := maxWidth + lheight := len(d.options) + _, sheight := screen.Size() + if ly+lheight >= sheight && ly-2 > lheight-ly { + ly = y - lheight + if ly < 0 { + ly = 0 + } + } + if ly+lheight >= sheight { + lheight = sheight - ly + } + d.list.SetRect(lx, ly, lwidth, lheight) + d.list.Draw(screen) + } +} + +// InputHandler returns the handler for this primitive. +func (d *DropDown) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return d.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + // A helper function which selects an item in the drop-down list based on + // the current prefix. + evalPrefix := func() { + if len(d.prefix) > 0 { + for index, option := range d.options { + if strings.HasPrefix(strings.ToLower(option.Text), d.prefix) { + d.list.SetCurrentItem(index) + return + } + } + // Prefix does not match any item. Remove last rune. + r := []rune(d.prefix) + d.prefix = string(r[:len(r)-1]) + } + } + + // Process key event. + switch key := event.Key(); key { + case tcell.KeyEnter, tcell.KeyRune, tcell.KeyDown: + d.prefix = "" + + // If the first key was a letter already, it becomes part of the prefix. + if r := event.Rune(); key == tcell.KeyRune && r != ' ' { + d.prefix += string(r) + evalPrefix() + } + + // Hand control over to the list. + d.open = true + optionBefore := d.currentOption + d.list.SetSelectedFunc(func(index int, mainText, secondaryText string, shortcut rune) { + // An option was selected. Close the list again. + d.open = false + setFocus(d) + d.currentOption = index + + // Trigger "selected" event. + if d.selected != nil { + d.selected(d.options[d.currentOption].Text, d.currentOption) + } + if d.options[d.currentOption].Selected != nil { + d.options[d.currentOption].Selected() + } + }).SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyRune { + d.prefix += string(event.Rune()) + evalPrefix() + } else if event.Key() == tcell.KeyBackspace || event.Key() == tcell.KeyBackspace2 { + if len(d.prefix) > 0 { + r := []rune(d.prefix) + d.prefix = string(r[:len(r)-1]) + } + evalPrefix() + } else if event.Key() == tcell.KeyEscape { + d.open = false + d.currentOption = optionBefore + setFocus(d) + } else { + d.prefix = "" + } + return event + }) + setFocus(d.list) + case tcell.KeyEscape, tcell.KeyTab, tcell.KeyBacktab: + if d.done != nil { + d.done(key) + } + if d.finished != nil { + d.finished(key) + } + } + }) +} + +// Focus is called by the application when the primitive receives focus. +func (d *DropDown) Focus(delegate func(p Primitive)) { + d.Box.Focus(delegate) + if d.open { + delegate(d.list) + } +} + +// HasFocus returns whether or not this primitive has focus. +func (d *DropDown) HasFocus() bool { + if d.open { + return d.list.HasFocus() + } + return d.hasFocus +} diff --git a/vendor/github.com/rivo/tview/flex.go b/vendor/github.com/rivo/tview/flex.go new file mode 100644 index 00000000000..7235614f26a --- /dev/null +++ b/vendor/github.com/rivo/tview/flex.go @@ -0,0 +1,193 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// Configuration values. +const ( + FlexRow = iota + FlexColumn +) + +// flexItem holds layout options for one item. +type flexItem struct { + Item Primitive // The item to be positioned. May be nil for an empty item. + FixedSize int // The item's fixed size which may not be changed, 0 if it has no fixed size. + Proportion int // The item's proportion. + Focus bool // Whether or not this item attracts the layout's focus. +} + +// Flex is a basic implementation of the Flexbox layout. The contained +// primitives are arranged horizontally or vertically. The way they are +// distributed along that dimension depends on their layout settings, which is +// either a fixed length or a proportional length. See AddItem() for details. +// +// See https://github.com/rivo/tview/wiki/Flex for an example. +type Flex struct { + *Box + + // The items to be positioned. + items []*flexItem + + // FlexRow or FlexColumn. + direction int + + // If set to true, Flex will use the entire screen as its available space + // instead its box dimensions. + fullScreen bool +} + +// NewFlex returns a new flexbox layout container with no primitives and its +// direction set to FlexColumn. To add primitives to this layout, see AddItem(). +// To change the direction, see SetDirection(). +// +// Note that Box, the superclass of Flex, will have its background color set to +// transparent so that any nil flex items will leave their background unchanged. +// To clear a Flex's background before any items are drawn, set it to the +// desired color: +// +// flex.SetBackgroundColor(tview.Styles.PrimitiveBackgroundColor) +func NewFlex() *Flex { + f := &Flex{ + Box: NewBox().SetBackgroundColor(tcell.ColorDefault), + direction: FlexColumn, + } + f.focus = f + return f +} + +// SetDirection sets the direction in which the contained primitives are +// distributed. This can be either FlexColumn (default) or FlexRow. +func (f *Flex) SetDirection(direction int) *Flex { + f.direction = direction + return f +} + +// SetFullScreen sets the flag which, when true, causes the flex layout to use +// the entire screen space instead of whatever size it is currently assigned to. +func (f *Flex) SetFullScreen(fullScreen bool) *Flex { + f.fullScreen = fullScreen + return f +} + +// AddItem adds a new item to the container. The "fixedSize" argument is a width +// or height that may not be changed by the layout algorithm. A value of 0 means +// that its size is flexible and may be changed. The "proportion" argument +// defines the relative size of the item compared to other flexible-size items. +// For example, items with a proportion of 2 will be twice as large as items +// with a proportion of 1. The proportion must be at least 1 if fixedSize == 0 +// (ignored otherwise). +// +// If "focus" is set to true, the item will receive focus when the Flex +// primitive receives focus. If multiple items have the "focus" flag set to +// true, the first one will receive focus. +// +// You can provide a nil value for the primitive. This will still consume screen +// space but nothing will be drawn. +func (f *Flex) AddItem(item Primitive, fixedSize, proportion int, focus bool) *Flex { + f.items = append(f.items, &flexItem{Item: item, FixedSize: fixedSize, Proportion: proportion, Focus: focus}) + return f +} + +// RemoveItem removes all items for the given primitive from the container, +// keeping the order of the remaining items intact. +func (f *Flex) RemoveItem(p Primitive) *Flex { + for index := len(f.items) - 1; index >= 0; index-- { + if f.items[index].Item == p { + f.items = append(f.items[:index], f.items[index+1:]...) + } + } + return f +} + +// ResizeItem sets a new size for the item(s) with the given primitive. If there +// are multiple Flex items with the same primitive, they will all receive the +// same size. For details regarding the size parameters, see AddItem(). +func (f *Flex) ResizeItem(p Primitive, fixedSize, proportion int) *Flex { + for _, item := range f.items { + if item.Item == p { + item.FixedSize = fixedSize + item.Proportion = proportion + } + } + return f +} + +// Draw draws this primitive onto the screen. +func (f *Flex) Draw(screen tcell.Screen) { + f.Box.Draw(screen) + + // Calculate size and position of the items. + + // Do we use the entire screen? + if f.fullScreen { + width, height := screen.Size() + f.SetRect(0, 0, width, height) + } + + // How much space can we distribute? + x, y, width, height := f.GetInnerRect() + var proportionSum int + distSize := width + if f.direction == FlexRow { + distSize = height + } + for _, item := range f.items { + if item.FixedSize > 0 { + distSize -= item.FixedSize + } else { + proportionSum += item.Proportion + } + } + + // Calculate positions and draw items. + pos := x + if f.direction == FlexRow { + pos = y + } + for _, item := range f.items { + size := item.FixedSize + if size <= 0 { + size = distSize * item.Proportion / proportionSum + distSize -= size + proportionSum -= item.Proportion + } + if item.Item != nil { + if f.direction == FlexColumn { + item.Item.SetRect(pos, y, size, height) + } else { + item.Item.SetRect(x, pos, width, size) + } + } + pos += size + + if item.Item != nil { + if item.Item.GetFocusable().HasFocus() { + defer item.Item.Draw(screen) + } else { + item.Item.Draw(screen) + } + } + } +} + +// Focus is called when this primitive receives focus. +func (f *Flex) Focus(delegate func(p Primitive)) { + for _, item := range f.items { + if item.Item != nil && item.Focus { + delegate(item.Item) + return + } + } +} + +// HasFocus returns whether or not this primitive has focus. +func (f *Flex) HasFocus() bool { + for _, item := range f.items { + if item.Item != nil && item.Item.GetFocusable().HasFocus() { + return true + } + } + return false +} diff --git a/vendor/github.com/rivo/tview/focusable.go b/vendor/github.com/rivo/tview/focusable.go new file mode 100644 index 00000000000..99fdaaf4eff --- /dev/null +++ b/vendor/github.com/rivo/tview/focusable.go @@ -0,0 +1,8 @@ +package tview + +// Focusable provides a method which determines if a primitive has focus. +// Composed primitives may be focused based on the focused state of their +// contained primitives. +type Focusable interface { + HasFocus() bool +} diff --git a/vendor/github.com/rivo/tview/form.go b/vendor/github.com/rivo/tview/form.go new file mode 100644 index 00000000000..aaa9ed7e11d --- /dev/null +++ b/vendor/github.com/rivo/tview/form.go @@ -0,0 +1,569 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// DefaultFormFieldWidth is the default field screen width of form elements +// whose field width is flexible (0). This is used in the Form class for +// horizontal layouts. +var DefaultFormFieldWidth = 10 + +// FormItem is the interface all form items must implement to be able to be +// included in a form. +type FormItem interface { + Primitive + + // GetLabel returns the item's label text. + GetLabel() string + + // SetFormAttributes sets a number of item attributes at once. + SetFormAttributes(labelWidth int, labelColor, bgColor, fieldTextColor, fieldBgColor tcell.Color) FormItem + + // GetFieldWidth returns the width of the form item's field (the area which + // is manipulated by the user) in number of screen cells. A value of 0 + // indicates the the field width is flexible and may use as much space as + // required. + GetFieldWidth() int + + // SetFinishedFunc sets the handler function for when the user finished + // entering data into the item. The handler may receive events for the + // Enter key (we're done), the Escape key (cancel input), the Tab key (move to + // next field), and the Backtab key (move to previous field). + SetFinishedFunc(handler func(key tcell.Key)) FormItem +} + +// Form allows you to combine multiple one-line form elements into a vertical +// or horizontal layout. Form elements include types such as InputField or +// Checkbox. These elements can be optionally followed by one or more buttons +// for which you can define form-wide actions (e.g. Save, Clear, Cancel). +// +// See https://github.com/rivo/tview/wiki/Form for an example. +type Form struct { + *Box + + // The items of the form (one row per item). + items []FormItem + + // The buttons of the form. + buttons []*Button + + // If set to true, instead of position items and buttons from top to bottom, + // they are positioned from left to right. + horizontal bool + + // The alignment of the buttons. + buttonsAlign int + + // The number of empty rows between items. + itemPadding int + + // The index of the item or button which has focus. (Items are counted first, + // buttons are counted last.) + focusedElement int + + // The label color. + labelColor tcell.Color + + // The background color of the input area. + fieldBackgroundColor tcell.Color + + // The text color of the input area. + fieldTextColor tcell.Color + + // The background color of the buttons. + buttonBackgroundColor tcell.Color + + // The color of the button text. + buttonTextColor tcell.Color + + // An optional function which is called when the user hits Escape. + cancel func() +} + +// NewForm returns a new form. +func NewForm() *Form { + box := NewBox().SetBorderPadding(1, 1, 1, 1) + + f := &Form{ + Box: box, + itemPadding: 1, + labelColor: Styles.SecondaryTextColor, + fieldBackgroundColor: Styles.ContrastBackgroundColor, + fieldTextColor: Styles.PrimaryTextColor, + buttonBackgroundColor: Styles.ContrastBackgroundColor, + buttonTextColor: Styles.PrimaryTextColor, + } + + f.focus = f + + return f +} + +// SetItemPadding sets the number of empty rows between form items for vertical +// layouts and the number of empty cells between form items for horizontal +// layouts. +func (f *Form) SetItemPadding(padding int) *Form { + f.itemPadding = padding + return f +} + +// SetHorizontal sets the direction the form elements are laid out. If set to +// true, instead of positioning them from top to bottom (the default), they are +// positioned from left to right, moving into the next row if there is not +// enough space. +func (f *Form) SetHorizontal(horizontal bool) *Form { + f.horizontal = horizontal + return f +} + +// SetLabelColor sets the color of the labels. +func (f *Form) SetLabelColor(color tcell.Color) *Form { + f.labelColor = color + return f +} + +// SetFieldBackgroundColor sets the background color of the input areas. +func (f *Form) SetFieldBackgroundColor(color tcell.Color) *Form { + f.fieldBackgroundColor = color + return f +} + +// SetFieldTextColor sets the text color of the input areas. +func (f *Form) SetFieldTextColor(color tcell.Color) *Form { + f.fieldTextColor = color + return f +} + +// SetButtonsAlign sets how the buttons align horizontally, one of AlignLeft +// (the default), AlignCenter, and AlignRight. This is only +func (f *Form) SetButtonsAlign(align int) *Form { + f.buttonsAlign = align + return f +} + +// SetButtonBackgroundColor sets the background color of the buttons. +func (f *Form) SetButtonBackgroundColor(color tcell.Color) *Form { + f.buttonBackgroundColor = color + return f +} + +// SetButtonTextColor sets the color of the button texts. +func (f *Form) SetButtonTextColor(color tcell.Color) *Form { + f.buttonTextColor = color + return f +} + +// AddInputField adds an input field to the form. It has a label, an optional +// initial value, a field width (a value of 0 extends it as far as possible), +// an optional accept function to validate the item's value (set to nil to +// accept any text), and an (optional) callback function which is invoked when +// the input field's text has changed. +func (f *Form) AddInputField(label, value string, fieldWidth int, accept func(textToCheck string, lastChar rune) bool, changed func(text string)) *Form { + f.items = append(f.items, NewInputField(). + SetLabel(label). + SetText(value). + SetFieldWidth(fieldWidth). + SetAcceptanceFunc(accept). + SetChangedFunc(changed)) + return f +} + +// AddPasswordField adds a password field to the form. This is similar to an +// input field except that the user's input not shown. Instead, a "mask" +// character is displayed. The password field has a label, an optional initial +// value, a field width (a value of 0 extends it as far as possible), and an +// (optional) callback function which is invoked when the input field's text has +// changed. +func (f *Form) AddPasswordField(label, value string, fieldWidth int, mask rune, changed func(text string)) *Form { + if mask == 0 { + mask = '*' + } + f.items = append(f.items, NewInputField(). + SetLabel(label). + SetText(value). + SetFieldWidth(fieldWidth). + SetMaskCharacter(mask). + SetChangedFunc(changed)) + return f +} + +// AddDropDown adds a drop-down element to the form. It has a label, options, +// and an (optional) callback function which is invoked when an option was +// selected. The initial option may be a negative value to indicate that no +// option is currently selected. +func (f *Form) AddDropDown(label string, options []string, initialOption int, selected func(option string, optionIndex int)) *Form { + f.items = append(f.items, NewDropDown(). + SetLabel(label). + SetCurrentOption(initialOption). + SetOptions(options, selected)) + return f +} + +// AddCheckbox adds a checkbox to the form. It has a label, an initial state, +// and an (optional) callback function which is invoked when the state of the +// checkbox was changed by the user. +func (f *Form) AddCheckbox(label string, checked bool, changed func(checked bool)) *Form { + f.items = append(f.items, NewCheckbox(). + SetLabel(label). + SetChecked(checked). + SetChangedFunc(changed)) + return f +} + +// AddButton adds a new button to the form. The "selected" function is called +// when the user selects this button. It may be nil. +func (f *Form) AddButton(label string, selected func()) *Form { + f.buttons = append(f.buttons, NewButton(label).SetSelectedFunc(selected)) + return f +} + +// GetButton returns the button at the specified 0-based index. Note that +// buttons have been specially prepared for this form and modifying some of +// their attributes may have unintended side effects. +func (f *Form) GetButton(index int) *Button { + return f.buttons[index] +} + +// RemoveButton removes the button at the specified position, starting with 0 +// for the button that was added first. +func (f *Form) RemoveButton(index int) *Form { + f.buttons = append(f.buttons[:index], f.buttons[index+1:]...) + return f +} + +// GetButtonCount returns the number of buttons in this form. +func (f *Form) GetButtonCount() int { + return len(f.buttons) +} + +// GetButtonIndex returns the index of the button with the given label, starting +// with 0 for the button that was added first. If no such label was found, -1 +// is returned. +func (f *Form) GetButtonIndex(label string) int { + for index, button := range f.buttons { + if button.GetLabel() == label { + return index + } + } + return -1 +} + +// Clear removes all input elements from the form, including the buttons if +// specified. +func (f *Form) Clear(includeButtons bool) *Form { + f.items = nil + if includeButtons { + f.buttons = nil + } + f.focusedElement = 0 + return f +} + +// AddFormItem adds a new item to the form. This can be used to add your own +// objects to the form. Note, however, that the Form class will override some +// of its attributes to make it work in the form context. Specifically, these +// are: +// +// - The label width +// - The label color +// - The background color +// - The field text color +// - The field background color +func (f *Form) AddFormItem(item FormItem) *Form { + f.items = append(f.items, item) + return f +} + +// GetFormItem returns the form element at the given position, starting with +// index 0. Elements are referenced in the order they were added. Buttons are +// not included. +func (f *Form) GetFormItem(index int) FormItem { + return f.items[index] +} + +// RemoveFormItem removes the form element at the given position, starting with +// index 0. Elements are referenced in the order they were added. Buttons are +// not included. +func (f *Form) RemoveFormItem(index int) *Form { + f.items = append(f.items[:index], f.items[index+1:]...) + return f +} + +// GetFormItemByLabel returns the first form element with the given label. If +// no such element is found, nil is returned. Buttons are not searched and will +// therefore not be returned. +func (f *Form) GetFormItemByLabel(label string) FormItem { + for _, item := range f.items { + if item.GetLabel() == label { + return item + } + } + return nil +} + +// GetFormItemIndex returns the index of the first form element with the given +// label. If no such element is found, -1 is returned. Buttons are not searched +// and will therefore not be returned. +func (f *Form) GetFormItemIndex(label string) int { + for index, item := range f.items { + if item.GetLabel() == label { + return index + } + } + return -1 +} + +// SetCancelFunc sets a handler which is called when the user hits the Escape +// key. +func (f *Form) SetCancelFunc(callback func()) *Form { + f.cancel = callback + return f +} + +// Draw draws this primitive onto the screen. +func (f *Form) Draw(screen tcell.Screen) { + f.Box.Draw(screen) + + // Determine the dimensions. + x, y, width, height := f.GetInnerRect() + topLimit := y + bottomLimit := y + height + rightLimit := x + width + startX := x + + // Find the longest label. + var maxLabelWidth int + for _, item := range f.items { + labelWidth := StringWidth(item.GetLabel()) + if labelWidth > maxLabelWidth { + maxLabelWidth = labelWidth + } + } + maxLabelWidth++ // Add one space. + + // Calculate positions of form items. + positions := make([]struct{ x, y, width, height int }, len(f.items)+len(f.buttons)) + var focusedPosition struct{ x, y, width, height int } + for index, item := range f.items { + // Calculate the space needed. + labelWidth := StringWidth(item.GetLabel()) + var itemWidth int + if f.horizontal { + fieldWidth := item.GetFieldWidth() + if fieldWidth == 0 { + fieldWidth = DefaultFormFieldWidth + } + labelWidth++ + itemWidth = labelWidth + fieldWidth + } else { + // We want all fields to align vertically. + labelWidth = maxLabelWidth + itemWidth = width + } + + // Advance to next line if there is no space. + if f.horizontal && x+labelWidth+1 >= rightLimit { + x = startX + y += 2 + } + + // Adjust the item's attributes. + if x+itemWidth >= rightLimit { + itemWidth = rightLimit - x + } + item.SetFormAttributes( + labelWidth, + f.labelColor, + f.backgroundColor, + f.fieldTextColor, + f.fieldBackgroundColor, + ) + + // Save position. + positions[index].x = x + positions[index].y = y + positions[index].width = itemWidth + positions[index].height = 1 + if item.GetFocusable().HasFocus() { + focusedPosition = positions[index] + } + + // Advance to next item. + if f.horizontal { + x += itemWidth + f.itemPadding + } else { + y += 1 + f.itemPadding + } + } + + // How wide are the buttons? + buttonWidths := make([]int, len(f.buttons)) + buttonsWidth := 0 + for index, button := range f.buttons { + w := StringWidth(button.GetLabel()) + 4 + buttonWidths[index] = w + buttonsWidth += w + 1 + } + buttonsWidth-- + + // Where do we place them? + if !f.horizontal && x+buttonsWidth < rightLimit { + if f.buttonsAlign == AlignRight { + x = rightLimit - buttonsWidth + } else if f.buttonsAlign == AlignCenter { + x = (x + rightLimit - buttonsWidth) / 2 + } + + // In vertical layouts, buttons always appear after an empty line. + if f.itemPadding == 0 { + y++ + } + } + + // Calculate positions of buttons. + for index, button := range f.buttons { + space := rightLimit - x + buttonWidth := buttonWidths[index] + if f.horizontal { + if space < buttonWidth-4 { + x = startX + y += 2 + space = width + } + } else { + if space < 1 { + break // No space for this button anymore. + } + } + if buttonWidth > space { + buttonWidth = space + } + button.SetLabelColor(f.buttonTextColor). + SetLabelColorActivated(f.buttonBackgroundColor). + SetBackgroundColorActivated(f.buttonTextColor). + SetBackgroundColor(f.buttonBackgroundColor) + + buttonIndex := index + len(f.items) + positions[buttonIndex].x = x + positions[buttonIndex].y = y + positions[buttonIndex].width = buttonWidth + positions[buttonIndex].height = 1 + + if button.HasFocus() { + focusedPosition = positions[buttonIndex] + } + + x += buttonWidth + 1 + } + + // Determine vertical offset based on the position of the focused item. + var offset int + if focusedPosition.y+focusedPosition.height > bottomLimit { + offset = focusedPosition.y + focusedPosition.height - bottomLimit + if focusedPosition.y-offset < topLimit { + offset = focusedPosition.y - topLimit + } + } + + // Draw items. + for index, item := range f.items { + // Set position. + y := positions[index].y - offset + height := positions[index].height + item.SetRect(positions[index].x, y, positions[index].width, height) + + // Is this item visible? + if y+height <= topLimit || y >= bottomLimit { + continue + } + + // Draw items with focus last (in case of overlaps). + if item.GetFocusable().HasFocus() { + defer item.Draw(screen) + } else { + item.Draw(screen) + } + } + + // Draw buttons. + for index, button := range f.buttons { + // Set position. + buttonIndex := index + len(f.items) + y := positions[buttonIndex].y - offset + height := positions[buttonIndex].height + button.SetRect(positions[buttonIndex].x, y, positions[buttonIndex].width, height) + + // Is this button visible? + if y+height <= topLimit || y >= bottomLimit { + continue + } + + // Draw button. + button.Draw(screen) + } +} + +// Focus is called by the application when the primitive receives focus. +func (f *Form) Focus(delegate func(p Primitive)) { + if len(f.items)+len(f.buttons) == 0 { + f.hasFocus = true + return + } + f.hasFocus = false + + // Hand on the focus to one of our child elements. + if f.focusedElement < 0 || f.focusedElement >= len(f.items)+len(f.buttons) { + f.focusedElement = 0 + } + handler := func(key tcell.Key) { + switch key { + case tcell.KeyTab, tcell.KeyEnter: + f.focusedElement++ + f.Focus(delegate) + case tcell.KeyBacktab: + f.focusedElement-- + if f.focusedElement < 0 { + f.focusedElement = len(f.items) + len(f.buttons) - 1 + } + f.Focus(delegate) + case tcell.KeyEscape: + if f.cancel != nil { + f.cancel() + } else { + f.focusedElement = 0 + f.Focus(delegate) + } + } + } + + if f.focusedElement < len(f.items) { + // We're selecting an item. + item := f.items[f.focusedElement] + item.SetFinishedFunc(handler) + delegate(item) + } else { + // We're selecting a button. + button := f.buttons[f.focusedElement-len(f.items)] + button.SetBlurFunc(handler) + delegate(button) + } +} + +// HasFocus returns whether or not this primitive has focus. +func (f *Form) HasFocus() bool { + if f.hasFocus { + return true + } + for _, item := range f.items { + if item.GetFocusable().HasFocus() { + return true + } + } + for _, button := range f.buttons { + if button.focus.HasFocus() { + return true + } + } + return false +} diff --git a/vendor/github.com/rivo/tview/frame.go b/vendor/github.com/rivo/tview/frame.go new file mode 100644 index 00000000000..77c5316e395 --- /dev/null +++ b/vendor/github.com/rivo/tview/frame.go @@ -0,0 +1,157 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// frameText holds information about a line of text shown in the frame. +type frameText struct { + Text string // The text to be displayed. + Header bool // true = place in header, false = place in footer. + Align int // One of the Align constants. + Color tcell.Color // The text color. +} + +// Frame is a wrapper which adds a border around another primitive. The top area +// (header) and the bottom area (footer) may also contain text. +// +// See https://github.com/rivo/tview/wiki/Frame for an example. +type Frame struct { + *Box + + // The contained primitive. + primitive Primitive + + // The lines of text to be displayed. + text []*frameText + + // Border spacing. + top, bottom, header, footer, left, right int +} + +// NewFrame returns a new frame around the given primitive. The primitive's +// size will be changed to fit within this frame. +func NewFrame(primitive Primitive) *Frame { + box := NewBox() + + f := &Frame{ + Box: box, + primitive: primitive, + top: 1, + bottom: 1, + header: 1, + footer: 1, + left: 1, + right: 1, + } + + f.focus = f + + return f +} + +// AddText adds text to the frame. Set "header" to true if the text is to appear +// in the header, above the contained primitive. Set it to false for it to +// appear in the footer, below the contained primitive. "align" must be one of +// the Align constants. Rows in the header are printed top to bottom, rows in +// the footer are printed bottom to top. Note that long text can overlap as +// different alignments will be placed on the same row. +func (f *Frame) AddText(text string, header bool, align int, color tcell.Color) *Frame { + f.text = append(f.text, &frameText{ + Text: text, + Header: header, + Align: align, + Color: color, + }) + return f +} + +// Clear removes all text from the frame. +func (f *Frame) Clear() *Frame { + f.text = nil + return f +} + +// SetBorders sets the width of the frame borders as well as "header" and +// "footer", the vertical space between the header and footer text and the +// contained primitive (does not apply if there is no text). +func (f *Frame) SetBorders(top, bottom, header, footer, left, right int) *Frame { + f.top, f.bottom, f.header, f.footer, f.left, f.right = top, bottom, header, footer, left, right + return f +} + +// Draw draws this primitive onto the screen. +func (f *Frame) Draw(screen tcell.Screen) { + f.Box.Draw(screen) + + // Calculate start positions. + x, top, width, height := f.GetInnerRect() + bottom := top + height - 1 + x += f.left + top += f.top + bottom -= f.bottom + width -= f.left + f.right + if width <= 0 || top >= bottom { + return // No space left. + } + + // Draw text. + var rows [6]int // top-left, top-center, top-right, bottom-left, bottom-center, bottom-right. + topMax := top + bottomMin := bottom + for _, text := range f.text { + // Where do we place this text? + var y int + if text.Header { + y = top + rows[text.Align] + rows[text.Align]++ + if y >= bottomMin { + continue + } + if y+1 > topMax { + topMax = y + 1 + } + } else { + y = bottom - rows[3+text.Align] + rows[3+text.Align]++ + if y <= topMax { + continue + } + if y-1 < bottomMin { + bottomMin = y - 1 + } + } + + // Draw text. + Print(screen, text.Text, x, y, width, text.Align, text.Color) + } + + // Set the size of the contained primitive. + if topMax > top { + top = topMax + f.header + } + if bottomMin < bottom { + bottom = bottomMin - f.footer + } + if top > bottom { + return // No space for the primitive. + } + f.primitive.SetRect(x, top, width, bottom+1-top) + + // Finally, draw the contained primitive. + f.primitive.Draw(screen) +} + +// Focus is called when this primitive receives focus. +func (f *Frame) Focus(delegate func(p Primitive)) { + delegate(f.primitive) +} + +// HasFocus returns whether or not this primitive has focus. +func (f *Frame) HasFocus() bool { + focusable, ok := f.primitive.(Focusable) + if ok { + return focusable.HasFocus() + } + return false +} diff --git a/vendor/github.com/rivo/tview/grid.go b/vendor/github.com/rivo/tview/grid.go new file mode 100644 index 00000000000..d0c460f9360 --- /dev/null +++ b/vendor/github.com/rivo/tview/grid.go @@ -0,0 +1,632 @@ +package tview + +import ( + "math" + + "github.com/gdamore/tcell" +) + +// gridItem represents one primitive and its possible position on a grid. +type gridItem struct { + Item Primitive // The item to be positioned. May be nil for an empty item. + Row, Column int // The top-left grid cell where the item is placed. + Width, Height int // The number of rows and columns the item occupies. + MinGridWidth, MinGridHeight int // The minimum grid width/height for which this item is visible. + Focus bool // Whether or not this item attracts the layout's focus. + + visible bool // Whether or not this item was visible the last time the grid was drawn. + x, y, w, h int // The last position of the item relative to the top-left corner of the grid. Undefined if visible is false. +} + +// Grid is an implementation of a grid-based layout. It works by defining the +// size of the rows and columns, then placing primitives into the grid. +// +// Some settings can lead to the grid exceeding its available space. SetOffset() +// can then be used to scroll in steps of rows and columns. These offset values +// can also be controlled with the arrow keys (or the "g","G", "j", "k", "h", +// and "l" keys) while the grid has focus and none of its contained primitives +// do. +// +// See https://github.com/rivo/tview/wiki/Grid for an example. +type Grid struct { + *Box + + // The items to be positioned. + items []*gridItem + + // The definition of the rows and columns of the grid. See + // SetRows()/SetColumns() for details. + rows, columns []int + + // The minimum sizes for rows and columns. + minWidth, minHeight int + + // The size of the gaps between neighboring primitives. This is automatically + // set to 1 if borders is true. + gapRows, gapColumns int + + // The number of rows and columns skipped before drawing the top-left corner + // of the grid. + rowOffset, columnOffset int + + // Whether or not borders are drawn around grid items. If this is set to true, + // a gap size of 1 is automatically assumed (which is filled with the border + // graphics). + borders bool + + // The color of the borders around grid items. + bordersColor tcell.Color +} + +// NewGrid returns a new grid-based layout container with no initial primitives. +// +// Note that Box, the superclass of Grid, will have its background color set to +// transparent so that any grid areas not covered by any primitives will leave +// their background unchanged. To clear a Grid's background before any items are +// drawn, set it to the desired color: +// +// grid.SetBackgroundColor(tview.Styles.PrimitiveBackgroundColor) +func NewGrid() *Grid { + g := &Grid{ + Box: NewBox().SetBackgroundColor(tcell.ColorDefault), + bordersColor: Styles.GraphicsColor, + } + g.focus = g + return g +} + +// SetColumns defines how the columns of the grid are distributed. Each value +// defines the size of one column, starting with the leftmost column. Values +// greater 0 represent absolute column widths (gaps not included). Values less +// or equal 0 represent proportional column widths or fractions of the remaining +// free space, where 0 is treated the same as -1. That is, a column with a value +// of -3 will have three times the width of a column with a value of -1 (or 0). +// The minimum width set with SetMinSize() is always observed. +// +// Primitives may extend beyond the columns defined explicitly with this +// function. A value of 0 is assumed for any undefined column. In fact, if you +// never call this function, all columns occupied by primitives will have the +// same width. On the other hand, unoccupied columns defined with this function +// will always take their place. +// +// Assuming a total width of the grid of 100 cells and a minimum width of 0, the +// following call will result in columns with widths of 30, 10, 15, 15, and 30 +// cells: +// +// grid.Setcolumns(30, 10, -1, -1, -2) +// +// If a primitive were then placed in the 6th and 7th column, the resulting +// widths would be: 30, 10, 10, 10, 20, 10, and 10 cells. +// +// If you then called SetMinSize() as follows: +// +// grid.SetMinSize(15, 20) +// +// The resulting widths would be: 30, 15, 15, 15, 20, 15, and 15 cells, a total +// of 125 cells, 25 cells wider than the available grid width. +func (g *Grid) SetColumns(columns ...int) *Grid { + g.columns = columns + return g +} + +// SetRows defines how the rows of the grid are distributed. These values behave +// the same as the column values provided with SetColumns(), see there for a +// definition and examples. +// +// The provided values correspond to row heights, the first value defining +// the height of the topmost row. +func (g *Grid) SetRows(rows ...int) *Grid { + g.rows = rows + return g +} + +// SetSize is a shortcut for SetRows() and SetColumns() where all row and column +// values are set to the given size values. See SetRows() for details on sizes. +func (g *Grid) SetSize(numRows, numColumns, rowSize, columnSize int) *Grid { + g.rows = make([]int, numRows) + for index := range g.rows { + g.rows[index] = rowSize + } + g.columns = make([]int, numColumns) + for index := range g.columns { + g.columns[index] = columnSize + } + return g +} + +// SetMinSize sets an absolute minimum width for rows and an absolute minimum +// height for columns. Panics if negative values are provided. +func (g *Grid) SetMinSize(row, column int) *Grid { + if row < 0 || column < 0 { + panic("Invalid minimum row/column size") + } + g.minHeight, g.minWidth = row, column + return g +} + +// SetGap sets the size of the gaps between neighboring primitives on the grid. +// If borders are drawn (see SetBorders()), these values are ignored and a gap +// of 1 is assumed. Panics if negative values are provided. +func (g *Grid) SetGap(row, column int) *Grid { + if row < 0 || column < 0 { + panic("Invalid gap size") + } + g.gapRows, g.gapColumns = row, column + return g +} + +// SetBorders sets whether or not borders are drawn around grid items. Setting +// this value to true will cause the gap values (see SetGap()) to be ignored and +// automatically assumed to be 1 where the border graphics are drawn. +func (g *Grid) SetBorders(borders bool) *Grid { + g.borders = borders + return g +} + +// SetBordersColor sets the color of the item borders. +func (g *Grid) SetBordersColor(color tcell.Color) *Grid { + g.bordersColor = color + return g +} + +// AddItem adds a primitive and its position to the grid. The top-left corner +// of the primitive will be located in the top-left corner of the grid cell at +// the given row and column and will span "rowSpan" rows and "colSpan" columns. +// For example, for a primitive to occupy rows 2, 3, and 4 and columns 5 and 6: +// +// grid.AddItem(p, 2, 5, 3, 2, true) +// +// If rowSpan or colSpan is 0, the primitive will not be drawn. +// +// You can add the same primitive multiple times with different grid positions. +// The minGridWidth and minGridHeight values will then determine which of those +// positions will be used. This is similar to CSS media queries. These minimum +// values refer to the overall size of the grid. If multiple items for the same +// primitive apply, the one that has at least one highest minimum value will be +// used, or the primitive added last if those values are the same. Example: +// +// grid.AddItem(p, 0, 0, 0, 0, 0, 0, true). // Hide in small grids. +// AddItem(p, 0, 0, 1, 2, 100, 0, true). // One-column layout for medium grids. +// AddItem(p, 1, 1, 3, 2, 300, 0, true) // Multi-column layout for large grids. +// +// To use the same grid layout for all sizes, simply set minGridWidth and +// minGridHeight to 0. +// +// If the item's focus is set to true, it will receive focus when the grid +// receives focus. If there are multiple items with a true focus flag, the last +// visible one that was added will receive focus. +func (g *Grid) AddItem(p Primitive, row, column, rowSpan, colSpan, minGridHeight, minGridWidth int, focus bool) *Grid { + g.items = append(g.items, &gridItem{ + Item: p, + Row: row, + Column: column, + Height: rowSpan, + Width: colSpan, + MinGridHeight: minGridHeight, + MinGridWidth: minGridWidth, + Focus: focus, + }) + return g +} + +// RemoveItem removes all items for the given primitive from the grid, keeping +// the order of the remaining items intact. +func (g *Grid) RemoveItem(p Primitive) *Grid { + for index := len(g.items) - 1; index >= 0; index-- { + if g.items[index].Item == p { + g.items = append(g.items[:index], g.items[index+1:]...) + } + } + return g +} + +// Clear removes all items from the grid. +func (g *Grid) Clear() *Grid { + g.items = nil + return g +} + +// SetOffset sets the number of rows and columns which are skipped before +// drawing the first grid cell in the top-left corner. As the grid will never +// completely move off the screen, these values may be adjusted the next time +// the grid is drawn. The actual position of the grid may also be adjusted such +// that contained primitives that have focus are visible. +func (g *Grid) SetOffset(rows, columns int) *Grid { + g.rowOffset, g.columnOffset = rows, columns + return g +} + +// GetOffset returns the current row and column offset (see SetOffset() for +// details). +func (g *Grid) GetOffset() (rows, columns int) { + return g.rowOffset, g.columnOffset +} + +// Focus is called when this primitive receives focus. +func (g *Grid) Focus(delegate func(p Primitive)) { + for _, item := range g.items { + if item.Focus { + delegate(item.Item) + return + } + } + g.hasFocus = true +} + +// Blur is called when this primitive loses focus. +func (g *Grid) Blur() { + g.hasFocus = false +} + +// HasFocus returns whether or not this primitive has focus. +func (g *Grid) HasFocus() bool { + for _, item := range g.items { + if item.visible && item.Item.GetFocusable().HasFocus() { + return true + } + } + return g.hasFocus +} + +// InputHandler returns the handler for this primitive. +func (g *Grid) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return g.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + switch event.Key() { + case tcell.KeyRune: + switch event.Rune() { + case 'g': + g.rowOffset, g.columnOffset = 0, 0 + case 'G': + g.rowOffset = math.MaxInt32 + case 'j': + g.rowOffset++ + case 'k': + g.rowOffset-- + case 'h': + g.columnOffset-- + case 'l': + g.columnOffset++ + } + case tcell.KeyHome: + g.rowOffset, g.columnOffset = 0, 0 + case tcell.KeyEnd: + g.rowOffset = math.MaxInt32 + case tcell.KeyUp: + g.rowOffset-- + case tcell.KeyDown: + g.rowOffset++ + case tcell.KeyLeft: + g.columnOffset-- + case tcell.KeyRight: + g.columnOffset++ + } + }) +} + +// Draw draws this primitive onto the screen. +func (g *Grid) Draw(screen tcell.Screen) { + g.Box.Draw(screen) + x, y, width, height := g.GetInnerRect() + screenWidth, screenHeight := screen.Size() + + // Make a list of items which apply. + items := make(map[Primitive]*gridItem) + for _, item := range g.items { + item.visible = false + if item.Width <= 0 || item.Height <= 0 || width < item.MinGridWidth || height < item.MinGridHeight { + continue + } + previousItem, ok := items[item.Item] + if ok && item.Width < previousItem.Width && item.Height < previousItem.Height { + continue + } + items[item.Item] = item + } + + // How many rows and columns do we have? + rows := len(g.rows) + columns := len(g.columns) + for _, item := range items { + rowEnd := item.Row + item.Height + if rowEnd > rows { + rows = rowEnd + } + columnEnd := item.Column + item.Width + if columnEnd > columns { + columns = columnEnd + } + } + if rows == 0 || columns == 0 { + return // No content. + } + + // Where are they located? + rowPos := make([]int, rows) + rowHeight := make([]int, rows) + columnPos := make([]int, columns) + columnWidth := make([]int, columns) + + // How much space do we distribute? + remainingWidth := width + remainingHeight := height + proportionalWidth := 0 + proportionalHeight := 0 + for index, row := range g.rows { + if row > 0 { + if row < g.minHeight { + row = g.minHeight + } + remainingHeight -= row + rowHeight[index] = row + } else if row == 0 { + proportionalHeight++ + } else { + proportionalHeight += -row + } + } + for index, column := range g.columns { + if column > 0 { + if column < g.minWidth { + column = g.minWidth + } + remainingWidth -= column + columnWidth[index] = column + } else if column == 0 { + proportionalWidth++ + } else { + proportionalWidth += -column + } + } + if g.borders { + remainingHeight -= rows + 1 + remainingWidth -= columns + 1 + } else { + remainingHeight -= (rows - 1) * g.gapRows + remainingWidth -= (columns - 1) * g.gapColumns + } + if rows > len(g.rows) { + proportionalHeight += rows - len(g.rows) + } + if columns > len(g.columns) { + proportionalWidth += columns - len(g.columns) + } + + // Distribute proportional rows/columns. + gridWidth := 0 + gridHeight := 0 + for index := 0; index < rows; index++ { + row := 0 + if index < len(g.rows) { + row = g.rows[index] + } + if row > 0 { + if row < g.minHeight { + row = g.minHeight + } + gridHeight += row + continue // Not proportional. We already know the width. + } else if row == 0 { + row = 1 + } else { + row = -row + } + rowAbs := row * remainingHeight / proportionalHeight + remainingHeight -= rowAbs + proportionalHeight -= row + if rowAbs < g.minHeight { + rowAbs = g.minHeight + } + rowHeight[index] = rowAbs + gridHeight += rowAbs + } + for index := 0; index < columns; index++ { + column := 0 + if index < len(g.columns) { + column = g.columns[index] + } + if column > 0 { + if column < g.minWidth { + column = g.minWidth + } + gridWidth += column + continue // Not proportional. We already know the height. + } else if column == 0 { + column = 1 + } else { + column = -column + } + columnAbs := column * remainingWidth / proportionalWidth + remainingWidth -= columnAbs + proportionalWidth -= column + if columnAbs < g.minWidth { + columnAbs = g.minWidth + } + columnWidth[index] = columnAbs + gridWidth += columnAbs + } + if g.borders { + gridHeight += rows + 1 + gridWidth += columns + 1 + } else { + gridHeight += (rows - 1) * g.gapRows + gridWidth += (columns - 1) * g.gapColumns + } + + // Calculate row/column positions. + columnX, rowY := x, y + if g.borders { + columnX++ + rowY++ + } + for index, row := range rowHeight { + rowPos[index] = rowY + gap := g.gapRows + if g.borders { + gap = 1 + } + rowY += row + gap + } + for index, column := range columnWidth { + columnPos[index] = columnX + gap := g.gapColumns + if g.borders { + gap = 1 + } + columnX += column + gap + } + + // Calculate primitive positions. + var focus *gridItem // The item which has focus. + for primitive, item := range items { + px := columnPos[item.Column] + py := rowPos[item.Row] + var pw, ph int + for index := 0; index < item.Height; index++ { + ph += rowHeight[item.Row+index] + } + for index := 0; index < item.Width; index++ { + pw += columnWidth[item.Column+index] + } + if g.borders { + pw += item.Width - 1 + ph += item.Height - 1 + } else { + pw += (item.Width - 1) * g.gapColumns + ph += (item.Height - 1) * g.gapRows + } + item.x, item.y, item.w, item.h = px, py, pw, ph + item.visible = true + if primitive.GetFocusable().HasFocus() { + focus = item + } + } + + // Calculate screen offsets. + var offsetX, offsetY, add int + if g.rowOffset < 0 { + g.rowOffset = 0 + } + if g.columnOffset < 0 { + g.columnOffset = 0 + } + if g.borders { + add = 1 + } + for row := 0; row < rows-1; row++ { + remainingHeight := gridHeight - offsetY + if focus != nil && focus.y-add <= offsetY || // Don't let the focused item move out of screen. + row >= g.rowOffset && (focus == nil || focus != nil && focus.y-offsetY < height) || // We've reached the requested offset. + remainingHeight <= height { // We have enough space to show the rest. + if row > 0 { + if focus != nil && focus.y+focus.h+add-offsetY > height { + offsetY += focus.y + focus.h + add - offsetY - height + } + if remainingHeight < height { + offsetY = gridHeight - height + } + } + g.rowOffset = row + break + } + offsetY = rowPos[row+1] - add + } + for column := 0; column < columns-1; column++ { + remainingWidth := gridWidth - offsetX + if focus != nil && focus.x-add <= offsetX || // Don't let the focused item move out of screen. + column >= g.columnOffset && (focus == nil || focus != nil && focus.x-offsetX < width) || // We've reached the requested offset. + remainingWidth <= width { // We have enough space to show the rest. + if column > 0 { + if focus != nil && focus.x+focus.w+add-offsetX > width { + offsetX += focus.x + focus.w + add - offsetX - width + } else if remainingWidth < width { + offsetX = gridWidth - width + } + } + g.columnOffset = column + break + } + offsetX = columnPos[column+1] - add + } + + // Draw primitives and borders. + for primitive, item := range items { + // Final primitive position. + if !item.visible { + continue + } + item.x -= offsetX + item.y -= offsetY + if item.x+item.w > x+width { + item.w = width - item.x + } + if item.y+item.h > y+height { + item.h = height - item.y + } + if item.x < 0 { + item.w += item.x + item.x = 0 + } + if item.y < 0 { + item.h += item.y + item.y = 0 + } + if item.w <= 0 || item.h <= 0 { + item.visible = false + continue + } + primitive.SetRect(item.x, item.y, item.w, item.h) + + // Draw primitive. + if item == focus { + defer primitive.Draw(screen) + } else { + primitive.Draw(screen) + } + + // Draw border around primitive. + if g.borders { + for bx := item.x; bx < item.x+item.w; bx++ { // Top/bottom lines. + if bx < 0 || bx >= screenWidth { + continue + } + by := item.y - 1 + if by >= 0 && by < screenHeight { + PrintJoinedSemigraphics(screen, bx, by, Borders.Horizontal, g.bordersColor) + } + by = item.y + item.h + if by >= 0 && by < screenHeight { + PrintJoinedSemigraphics(screen, bx, by, Borders.Horizontal, g.bordersColor) + } + } + for by := item.y; by < item.y+item.h; by++ { // Left/right lines. + if by < 0 || by >= screenHeight { + continue + } + bx := item.x - 1 + if bx >= 0 && bx < screenWidth { + PrintJoinedSemigraphics(screen, bx, by, Borders.Vertical, g.bordersColor) + } + bx = item.x + item.w + if bx >= 0 && bx < screenWidth { + PrintJoinedSemigraphics(screen, bx, by, Borders.Vertical, g.bordersColor) + } + } + bx, by := item.x-1, item.y-1 // Top-left corner. + if bx >= 0 && bx < screenWidth && by >= 0 && by < screenHeight { + PrintJoinedSemigraphics(screen, bx, by, Borders.TopLeft, g.bordersColor) + } + bx, by = item.x+item.w, item.y-1 // Top-right corner. + if bx >= 0 && bx < screenWidth && by >= 0 && by < screenHeight { + PrintJoinedSemigraphics(screen, bx, by, Borders.TopRight, g.bordersColor) + } + bx, by = item.x-1, item.y+item.h // Bottom-left corner. + if bx >= 0 && bx < screenWidth && by >= 0 && by < screenHeight { + PrintJoinedSemigraphics(screen, bx, by, Borders.BottomLeft, g.bordersColor) + } + bx, by = item.x+item.w, item.y+item.h // Bottom-right corner. + if bx >= 0 && bx < screenWidth && by >= 0 && by < screenHeight { + PrintJoinedSemigraphics(screen, bx, by, Borders.BottomRight, g.bordersColor) + } + } + } +} diff --git a/vendor/github.com/rivo/tview/inputfield.go b/vendor/github.com/rivo/tview/inputfield.go new file mode 100644 index 00000000000..856cf3b82c2 --- /dev/null +++ b/vendor/github.com/rivo/tview/inputfield.go @@ -0,0 +1,445 @@ +package tview + +import ( + "math" + "regexp" + "strings" + "unicode/utf8" + + "github.com/gdamore/tcell" + runewidth "github.com/mattn/go-runewidth" +) + +// InputField is a one-line box (three lines if there is a title) where the +// user can enter text. Use SetAcceptanceFunc() to accept or reject input, +// SetChangedFunc() to listen for changes, and SetMaskCharacter() to hide input +// from onlookers (e.g. for password input). +// +// The following keys can be used for navigation and editing: +// +// - Left arrow: Move left by one character. +// - Right arrow: Move right by one character. +// - Home, Ctrl-A, Alt-a: Move to the beginning of the line. +// - End, Ctrl-E, Alt-e: Move to the end of the line. +// - Alt-left, Alt-b: Move left by one word. +// - Alt-right, Alt-f: Move right by one word. +// - Backspace: Delete the character before the cursor. +// - Delete: Delete the character after the cursor. +// - Ctrl-K: Delete from the cursor to the end of the line. +// - Ctrl-W: Delete the last word before the cursor. +// - Ctrl-U: Delete the entire line. +// +// See https://github.com/rivo/tview/wiki/InputField for an example. +type InputField struct { + *Box + + // The text that was entered. + text string + + // The text to be displayed before the input area. + label string + + // The text to be displayed in the input area when "text" is empty. + placeholder string + + // The label color. + labelColor tcell.Color + + // The background color of the input area. + fieldBackgroundColor tcell.Color + + // The text color of the input area. + fieldTextColor tcell.Color + + // The text color of the placeholder. + placeholderTextColor tcell.Color + + // The screen width of the label area. A value of 0 means use the width of + // the label text. + labelWidth int + + // The screen width of the input area. A value of 0 means extend as much as + // possible. + fieldWidth int + + // A character to mask entered text (useful for password fields). A value of 0 + // disables masking. + maskCharacter rune + + // The cursor position as a byte index into the text string. + cursorPos int + + // The number of bytes of the text string skipped ahead while drawing. + offset int + + // An optional function which may reject the last character that was entered. + accept func(text string, ch rune) bool + + // An optional function which is called when the input has changed. + changed func(text string) + + // An optional function which is called when the user indicated that they + // are done entering text. The key which was pressed is provided (tab, + // shift-tab, enter, or escape). + done func(tcell.Key) + + // A callback function set by the Form class and called when the user leaves + // this form item. + finished func(tcell.Key) +} + +// NewInputField returns a new input field. +func NewInputField() *InputField { + return &InputField{ + Box: NewBox(), + labelColor: Styles.SecondaryTextColor, + fieldBackgroundColor: Styles.ContrastBackgroundColor, + fieldTextColor: Styles.PrimaryTextColor, + placeholderTextColor: Styles.ContrastSecondaryTextColor, + } +} + +// SetText sets the current text of the input field. +func (i *InputField) SetText(text string) *InputField { + i.text = text + i.cursorPos = len(text) + if i.changed != nil { + i.changed(text) + } + return i +} + +// GetText returns the current text of the input field. +func (i *InputField) GetText() string { + return i.text +} + +// SetLabel sets the text to be displayed before the input area. +func (i *InputField) SetLabel(label string) *InputField { + i.label = label + return i +} + +// GetLabel returns the text to be displayed before the input area. +func (i *InputField) GetLabel() string { + return i.label +} + +// SetLabelWidth sets the screen width of the label. A value of 0 will cause the +// primitive to use the width of the label string. +func (i *InputField) SetLabelWidth(width int) *InputField { + i.labelWidth = width + return i +} + +// SetPlaceholder sets the text to be displayed when the input text is empty. +func (i *InputField) SetPlaceholder(text string) *InputField { + i.placeholder = text + return i +} + +// SetLabelColor sets the color of the label. +func (i *InputField) SetLabelColor(color tcell.Color) *InputField { + i.labelColor = color + return i +} + +// SetFieldBackgroundColor sets the background color of the input area. +func (i *InputField) SetFieldBackgroundColor(color tcell.Color) *InputField { + i.fieldBackgroundColor = color + return i +} + +// SetFieldTextColor sets the text color of the input area. +func (i *InputField) SetFieldTextColor(color tcell.Color) *InputField { + i.fieldTextColor = color + return i +} + +// SetPlaceholderTextColor sets the text color of placeholder text. +func (i *InputField) SetPlaceholderTextColor(color tcell.Color) *InputField { + i.placeholderTextColor = color + return i +} + +// SetFormAttributes sets attributes shared by all form items. +func (i *InputField) SetFormAttributes(labelWidth int, labelColor, bgColor, fieldTextColor, fieldBgColor tcell.Color) FormItem { + i.labelWidth = labelWidth + i.labelColor = labelColor + i.backgroundColor = bgColor + i.fieldTextColor = fieldTextColor + i.fieldBackgroundColor = fieldBgColor + return i +} + +// SetFieldWidth sets the screen width of the input area. A value of 0 means +// extend as much as possible. +func (i *InputField) SetFieldWidth(width int) *InputField { + i.fieldWidth = width + return i +} + +// GetFieldWidth returns this primitive's field width. +func (i *InputField) GetFieldWidth() int { + return i.fieldWidth +} + +// SetMaskCharacter sets a character that masks user input on a screen. A value +// of 0 disables masking. +func (i *InputField) SetMaskCharacter(mask rune) *InputField { + i.maskCharacter = mask + return i +} + +// SetAcceptanceFunc sets a handler which may reject the last character that was +// entered (by returning false). +// +// This package defines a number of variables prefixed with InputField which may +// be used for common input (e.g. numbers, maximum text length). +func (i *InputField) SetAcceptanceFunc(handler func(textToCheck string, lastChar rune) bool) *InputField { + i.accept = handler + return i +} + +// SetChangedFunc sets a handler which is called whenever the text of the input +// field has changed. It receives the current text (after the change). +func (i *InputField) SetChangedFunc(handler func(text string)) *InputField { + i.changed = handler + return i +} + +// SetDoneFunc sets a handler which is called when the user is done entering +// text. The callback function is provided with the key that was pressed, which +// is one of the following: +// +// - KeyEnter: Done entering text. +// - KeyEscape: Abort text input. +// - KeyTab: Move to the next field. +// - KeyBacktab: Move to the previous field. +func (i *InputField) SetDoneFunc(handler func(key tcell.Key)) *InputField { + i.done = handler + return i +} + +// SetFinishedFunc sets a callback invoked when the user leaves this form item. +func (i *InputField) SetFinishedFunc(handler func(key tcell.Key)) FormItem { + i.finished = handler + return i +} + +// Draw draws this primitive onto the screen. +func (i *InputField) Draw(screen tcell.Screen) { + i.Box.Draw(screen) + + // Prepare + x, y, width, height := i.GetInnerRect() + rightLimit := x + width + if height < 1 || rightLimit <= x { + return + } + + // Draw label. + if i.labelWidth > 0 { + labelWidth := i.labelWidth + if labelWidth > rightLimit-x { + labelWidth = rightLimit - x + } + Print(screen, i.label, x, y, labelWidth, AlignLeft, i.labelColor) + x += labelWidth + } else { + _, drawnWidth := Print(screen, i.label, x, y, rightLimit-x, AlignLeft, i.labelColor) + x += drawnWidth + } + + // Draw input area. + fieldWidth := i.fieldWidth + if fieldWidth == 0 { + fieldWidth = math.MaxInt32 + } + if rightLimit-x < fieldWidth { + fieldWidth = rightLimit - x + } + fieldStyle := tcell.StyleDefault.Background(i.fieldBackgroundColor) + for index := 0; index < fieldWidth; index++ { + screen.SetContent(x+index, y, ' ', nil, fieldStyle) + } + + // Text. + var cursorScreenPos int + text := i.text + if text == "" && i.placeholder != "" { + // Draw placeholder text. + Print(screen, Escape(i.placeholder), x, y, fieldWidth, AlignLeft, i.placeholderTextColor) + i.offset = 0 + } else { + // Draw entered text. + if i.maskCharacter > 0 { + text = strings.Repeat(string(i.maskCharacter), utf8.RuneCountInString(i.text)) + } + stringWidth := runewidth.StringWidth(text) + if fieldWidth >= stringWidth { + // We have enough space for the full text. + Print(screen, Escape(text), x, y, fieldWidth, AlignLeft, i.fieldTextColor) + i.offset = 0 + iterateString(text, func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + if textPos >= i.cursorPos { + return true + } + cursorScreenPos += screenWidth + return false + }) + } else { + // The text doesn't fit. Where is the cursor? + if i.cursorPos < 0 { + i.cursorPos = 0 + } else if i.cursorPos > len(text) { + i.cursorPos = len(text) + } + // Shift the text so the cursor is inside the field. + var shiftLeft int + if i.offset > i.cursorPos { + i.offset = i.cursorPos + } else if subWidth := runewidth.StringWidth(text[i.offset:i.cursorPos]); subWidth > fieldWidth-1 { + shiftLeft = subWidth - fieldWidth + 1 + } + currentOffset := i.offset + iterateString(text, func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + if textPos >= currentOffset { + if shiftLeft > 0 { + i.offset = textPos + textWidth + shiftLeft -= screenWidth + } else { + if textPos+textWidth > i.cursorPos { + return true + } + cursorScreenPos += screenWidth + } + } + return false + }) + Print(screen, Escape(text[i.offset:]), x, y, fieldWidth, AlignLeft, i.fieldTextColor) + } + } + + // Set cursor. + if i.focus.HasFocus() { + screen.ShowCursor(x+cursorScreenPos, y) + } +} + +// InputHandler returns the handler for this primitive. +func (i *InputField) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return i.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + // Trigger changed events. + currentText := i.text + defer func() { + if i.text != currentText && i.changed != nil { + i.changed(i.text) + } + }() + + // Movement functions. + home := func() { i.cursorPos = 0 } + end := func() { i.cursorPos = len(i.text) } + moveLeft := func() { + iterateStringReverse(i.text[:i.cursorPos], func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + i.cursorPos -= textWidth + return true + }) + } + moveRight := func() { + iterateString(i.text[i.cursorPos:], func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + i.cursorPos += textWidth + return true + }) + } + moveWordLeft := func() { + i.cursorPos = len(regexp.MustCompile(`\S+\s*$`).ReplaceAllString(i.text[:i.cursorPos], "")) + } + moveWordRight := func() { + i.cursorPos = len(i.text) - len(regexp.MustCompile(`^\s*\S+\s*`).ReplaceAllString(i.text[i.cursorPos:], "")) + } + + // Add character function. Returns whether or not the rune character is + // accepted. + add := func(r rune) bool { + newText := i.text[:i.cursorPos] + string(r) + i.text[i.cursorPos:] + if i.accept != nil && !i.accept(newText, r) { + return false + } + i.text = newText + i.cursorPos += len(string(r)) + return true + } + + // Process key event. + switch key := event.Key(); key { + case tcell.KeyRune: // Regular character. + if event.Modifiers()&tcell.ModAlt > 0 { + // We accept some Alt- key combinations. + switch event.Rune() { + case 'a': // Home. + home() + case 'e': // End. + end() + case 'b': // Move word left. + moveWordLeft() + case 'f': // Move word right. + moveWordRight() + } + } else { + // Other keys are simply accepted as regular characters. + if !add(event.Rune()) { + break + } + } + case tcell.KeyCtrlU: // Delete all. + i.text = "" + i.cursorPos = 0 + case tcell.KeyCtrlK: // Delete until the end of the line. + i.text = i.text[:i.cursorPos] + case tcell.KeyCtrlW: // Delete last word. + lastWord := regexp.MustCompile(`\S+\s*$`) + newText := lastWord.ReplaceAllString(i.text[:i.cursorPos], "") + i.text[i.cursorPos:] + i.cursorPos -= len(i.text) - len(newText) + i.text = newText + case tcell.KeyBackspace, tcell.KeyBackspace2: // Delete character before the cursor. + iterateStringReverse(i.text[:i.cursorPos], func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + i.text = i.text[:textPos] + i.text[textPos+textWidth:] + i.cursorPos -= textWidth + return true + }) + if i.offset >= i.cursorPos { + i.offset = 0 + } + case tcell.KeyDelete: // Delete character after the cursor. + iterateString(i.text[i.cursorPos:], func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + i.text = i.text[:i.cursorPos] + i.text[i.cursorPos+textWidth:] + return true + }) + case tcell.KeyLeft: + if event.Modifiers()&tcell.ModAlt > 0 { + moveWordLeft() + } else { + moveLeft() + } + case tcell.KeyRight: + if event.Modifiers()&tcell.ModAlt > 0 { + moveWordRight() + } else { + moveRight() + } + case tcell.KeyHome, tcell.KeyCtrlA: + home() + case tcell.KeyEnd, tcell.KeyCtrlE: + end() + case tcell.KeyEnter, tcell.KeyTab, tcell.KeyBacktab, tcell.KeyEscape: // We're done. + if i.done != nil { + i.done(key) + } + if i.finished != nil { + i.finished(key) + } + } + }) +} diff --git a/vendor/github.com/rivo/tview/list.go b/vendor/github.com/rivo/tview/list.go new file mode 100644 index 00000000000..3990544611d --- /dev/null +++ b/vendor/github.com/rivo/tview/list.go @@ -0,0 +1,510 @@ +package tview + +import ( + "fmt" + "strings" + + "github.com/gdamore/tcell" +) + +// listItem represents one item in a List. +type listItem struct { + MainText string // The main text of the list item. + SecondaryText string // A secondary text to be shown underneath the main text. + Shortcut rune // The key to select the list item directly, 0 if there is no shortcut. + Selected func() // The optional function which is called when the item is selected. +} + +// List displays rows of items, each of which can be selected. +// +// See https://github.com/rivo/tview/wiki/List for an example. +type List struct { + *Box + + // The items of the list. + items []*listItem + + // The index of the currently selected item. + currentItem int + + // Whether or not to show the secondary item texts. + showSecondaryText bool + + // The item main text color. + mainTextColor tcell.Color + + // The item secondary text color. + secondaryTextColor tcell.Color + + // The item shortcut text color. + shortcutColor tcell.Color + + // The text color for selected items. + selectedTextColor tcell.Color + + // The background color for selected items. + selectedBackgroundColor tcell.Color + + // If true, the selection is only shown when the list has focus. + selectedFocusOnly bool + + // The number of list items skipped at the top before the first item is drawn. + offset int + + // An optional function which is called when the user has navigated to a list + // item. + changed func(index int, mainText, secondaryText string, shortcut rune) + + // An optional function which is called when a list item was selected. This + // function will be called even if the list item defines its own callback. + selected func(index int, mainText, secondaryText string, shortcut rune) + + // An optional function which is called when the user presses the Escape key. + done func() +} + +// NewList returns a new form. +func NewList() *List { + return &List{ + Box: NewBox(), + showSecondaryText: true, + mainTextColor: Styles.PrimaryTextColor, + secondaryTextColor: Styles.TertiaryTextColor, + shortcutColor: Styles.SecondaryTextColor, + selectedTextColor: Styles.PrimitiveBackgroundColor, + selectedBackgroundColor: Styles.PrimaryTextColor, + } +} + +// SetCurrentItem sets the currently selected item by its index, starting at 0 +// for the first item. If a negative index is provided, items are referred to +// from the back (-1 = last item, -2 = second-to-last item, and so on). Out of +// range indices are clamped to the beginning/end. +// +// Calling this function triggers a "changed" event if the selection changes. +func (l *List) SetCurrentItem(index int) *List { + if index < 0 { + index = len(l.items) + index + } + if index >= len(l.items) { + index = len(l.items) - 1 + } + if index < 0 { + index = 0 + } + l.currentItem = index + + if index != l.currentItem && l.changed != nil { + item := l.items[l.currentItem] + l.changed(l.currentItem, item.MainText, item.SecondaryText, item.Shortcut) + } + + return l +} + +// GetCurrentItem returns the index of the currently selected list item, +// starting at 0 for the first item. +func (l *List) GetCurrentItem() int { + return l.currentItem +} + +// RemoveItem removes the item with the given index (starting at 0) from the +// list. If a negative index is provided, items are referred to from the back +// (-1 = last item, -2 = second-to-last item, and so on). Out of range indices +// are clamped to the beginning/end, i.e. unless the list is empty, an item is +// always removed. +// +// The currently selected item is shifted accordingly. If it is the one that is +// removed, a "changed" event is fired. +func (l *List) RemoveItem(index int) *List { + if len(l.items) == 0 { + return l + } + + // Adjust index. + if index < 0 { + index = len(l.items) + index + } + if index >= len(l.items) { + index = len(l.items) - 1 + } + if index < 0 { + index = 0 + } + + // Remove item. + l.items = append(l.items[:index], l.items[index+1:]...) + + // If there is nothing left, we're done. + if len(l.items) == 0 { + return l + } + + // Shift current item. + previousCurrentItem := l.currentItem + if l.currentItem >= index { + l.currentItem-- + } + + // Fire "changed" event for removed items. + if previousCurrentItem == index && l.changed != nil { + item := l.items[l.currentItem] + l.changed(l.currentItem, item.MainText, item.SecondaryText, item.Shortcut) + } + + return l +} + +// SetMainTextColor sets the color of the items' main text. +func (l *List) SetMainTextColor(color tcell.Color) *List { + l.mainTextColor = color + return l +} + +// SetSecondaryTextColor sets the color of the items' secondary text. +func (l *List) SetSecondaryTextColor(color tcell.Color) *List { + l.secondaryTextColor = color + return l +} + +// SetShortcutColor sets the color of the items' shortcut. +func (l *List) SetShortcutColor(color tcell.Color) *List { + l.shortcutColor = color + return l +} + +// SetSelectedTextColor sets the text color of selected items. +func (l *List) SetSelectedTextColor(color tcell.Color) *List { + l.selectedTextColor = color + return l +} + +// SetSelectedBackgroundColor sets the background color of selected items. +func (l *List) SetSelectedBackgroundColor(color tcell.Color) *List { + l.selectedBackgroundColor = color + return l +} + +// SetSelectedFocusOnly sets a flag which determines when the currently selected +// list item is highlighted. If set to true, selected items are only highlighted +// when the list has focus. If set to false, they are always highlighted. +func (l *List) SetSelectedFocusOnly(focusOnly bool) *List { + l.selectedFocusOnly = focusOnly + return l +} + +// ShowSecondaryText determines whether or not to show secondary item texts. +func (l *List) ShowSecondaryText(show bool) *List { + l.showSecondaryText = show + return l +} + +// SetChangedFunc sets the function which is called when the user navigates to +// a list item. The function receives the item's index in the list of items +// (starting with 0), its main text, secondary text, and its shortcut rune. +// +// This function is also called when the first item is added or when +// SetCurrentItem() is called. +func (l *List) SetChangedFunc(handler func(index int, mainText string, secondaryText string, shortcut rune)) *List { + l.changed = handler + return l +} + +// SetSelectedFunc sets the function which is called when the user selects a +// list item by pressing Enter on the current selection. The function receives +// the item's index in the list of items (starting with 0), its main text, +// secondary text, and its shortcut rune. +func (l *List) SetSelectedFunc(handler func(int, string, string, rune)) *List { + l.selected = handler + return l +} + +// SetDoneFunc sets a function which is called when the user presses the Escape +// key. +func (l *List) SetDoneFunc(handler func()) *List { + l.done = handler + return l +} + +// AddItem calls InsertItem() with an index of -1. +func (l *List) AddItem(mainText, secondaryText string, shortcut rune, selected func()) *List { + l.InsertItem(-1, mainText, secondaryText, shortcut, selected) + return l +} + +// InsertItem adds a new item to the list at the specified index. An index of 0 +// will insert the item at the beginning, an index of 1 before the second item, +// and so on. An index of GetItemCount() or higher will insert the item at the +// end of the list. Negative indices are also allowed: An index of -1 will +// insert the item at the end of the list, an index of -2 before the last item, +// and so on. An index of -GetItemCount()-1 or lower will insert the item at the +// beginning. +// +// An item has a main text which will be highlighted when selected. It also has +// a secondary text which is shown underneath the main text (if it is set to +// visible) but which may remain empty. +// +// The shortcut is a key binding. If the specified rune is entered, the item +// is selected immediately. Set to 0 for no binding. +// +// The "selected" callback will be invoked when the user selects the item. You +// may provide nil if no such callback is needed or if all events are handled +// through the selected callback set with SetSelectedFunc(). +// +// The currently selected item will shift its position accordingly. If the list +// was previously empty, a "changed" event is fired because the new item becomes +// selected. +func (l *List) InsertItem(index int, mainText, secondaryText string, shortcut rune, selected func()) *List { + item := &listItem{ + MainText: mainText, + SecondaryText: secondaryText, + Shortcut: shortcut, + Selected: selected, + } + + // Shift index to range. + if index < 0 { + index = len(l.items) + index + 1 + } + if index < 0 { + index = 0 + } else if index > len(l.items) { + index = len(l.items) + } + + // Shift current item. + if l.currentItem < len(l.items) && l.currentItem >= index { + l.currentItem++ + } + + // Insert item (make space for the new item, then shift and insert). + l.items = append(l.items, nil) + if index < len(l.items)-1 { // -1 because l.items has already grown by one item. + copy(l.items[index+1:], l.items[index:]) + } + l.items[index] = item + + // Fire a "change" event for the first item in the list. + if len(l.items) == 1 && l.changed != nil { + item := l.items[0] + l.changed(0, item.MainText, item.SecondaryText, item.Shortcut) + } + + return l +} + +// GetItemCount returns the number of items in the list. +func (l *List) GetItemCount() int { + return len(l.items) +} + +// GetItemText returns an item's texts (main and secondary). Panics if the index +// is out of range. +func (l *List) GetItemText(index int) (main, secondary string) { + return l.items[index].MainText, l.items[index].SecondaryText +} + +// SetItemText sets an item's main and secondary text. Panics if the index is +// out of range. +func (l *List) SetItemText(index int, main, secondary string) *List { + item := l.items[index] + item.MainText = main + item.SecondaryText = secondary + return l +} + +// FindItems searches the main and secondary texts for the given strings and +// returns a list of item indices in which those strings are found. One of the +// two search strings may be empty, it will then be ignored. Indices are always +// returned in ascending order. +// +// If mustContainBoth is set to true, mainSearch must be contained in the main +// text AND secondarySearch must be contained in the secondary text. If it is +// false, only one of the two search strings must be contained. +// +// Set ignoreCase to true for case-insensitive search. +func (l *List) FindItems(mainSearch, secondarySearch string, mustContainBoth, ignoreCase bool) (indices []int) { + if mainSearch == "" && secondarySearch == "" { + return + } + + if ignoreCase { + mainSearch = strings.ToLower(mainSearch) + secondarySearch = strings.ToLower(secondarySearch) + } + + for index, item := range l.items { + mainText := item.MainText + secondaryText := item.SecondaryText + if ignoreCase { + mainText = strings.ToLower(mainText) + secondaryText = strings.ToLower(secondaryText) + } + + // strings.Contains() always returns true for a "" search. + mainContained := strings.Contains(mainText, mainSearch) + secondaryContained := strings.Contains(secondaryText, secondarySearch) + if mustContainBoth && mainContained && secondaryContained || + !mustContainBoth && (mainText != "" && mainContained || secondaryText != "" && secondaryContained) { + indices = append(indices, index) + } + } + + return +} + +// Clear removes all items from the list. +func (l *List) Clear() *List { + l.items = nil + l.currentItem = 0 + return l +} + +// Draw draws this primitive onto the screen. +func (l *List) Draw(screen tcell.Screen) { + l.Box.Draw(screen) + + // Determine the dimensions. + x, y, width, height := l.GetInnerRect() + bottomLimit := y + height + + // Do we show any shortcuts? + var showShortcuts bool + for _, item := range l.items { + if item.Shortcut != 0 { + showShortcuts = true + x += 4 + width -= 4 + break + } + } + + // Adjust offset to keep the current selection in view. + if l.currentItem < l.offset { + l.offset = l.currentItem + } else if l.showSecondaryText { + if 2*(l.currentItem-l.offset) >= height-1 { + l.offset = (2*l.currentItem + 3 - height) / 2 + } + } else { + if l.currentItem-l.offset >= height { + l.offset = l.currentItem + 1 - height + } + } + + // Draw the list items. + for index, item := range l.items { + if index < l.offset { + continue + } + + if y >= bottomLimit { + break + } + + // Shortcuts. + if showShortcuts && item.Shortcut != 0 { + Print(screen, fmt.Sprintf("(%s)", string(item.Shortcut)), x-5, y, 4, AlignRight, l.shortcutColor) + } + + // Main text. + Print(screen, item.MainText, x, y, width, AlignLeft, l.mainTextColor) + + // Background color of selected text. + if index == l.currentItem && (!l.selectedFocusOnly || l.HasFocus()) { + textWidth := StringWidth(item.MainText) + for bx := 0; bx < textWidth && bx < width; bx++ { + m, c, style, _ := screen.GetContent(x+bx, y) + fg, _, _ := style.Decompose() + if fg == l.mainTextColor { + fg = l.selectedTextColor + } + style = style.Background(l.selectedBackgroundColor).Foreground(fg) + screen.SetContent(x+bx, y, m, c, style) + } + } + + y++ + + if y >= bottomLimit { + break + } + + // Secondary text. + if l.showSecondaryText { + Print(screen, item.SecondaryText, x, y, width, AlignLeft, l.secondaryTextColor) + y++ + } + } +} + +// InputHandler returns the handler for this primitive. +func (l *List) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return l.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + previousItem := l.currentItem + + switch key := event.Key(); key { + case tcell.KeyTab, tcell.KeyDown, tcell.KeyRight: + l.currentItem++ + case tcell.KeyBacktab, tcell.KeyUp, tcell.KeyLeft: + l.currentItem-- + case tcell.KeyHome: + l.currentItem = 0 + case tcell.KeyEnd: + l.currentItem = len(l.items) - 1 + case tcell.KeyPgDn: + l.currentItem += 5 + case tcell.KeyPgUp: + l.currentItem -= 5 + case tcell.KeyEnter: + if l.currentItem >= 0 && l.currentItem < len(l.items) { + item := l.items[l.currentItem] + if item.Selected != nil { + item.Selected() + } + if l.selected != nil { + l.selected(l.currentItem, item.MainText, item.SecondaryText, item.Shortcut) + } + } + case tcell.KeyEscape: + if l.done != nil { + l.done() + } + case tcell.KeyRune: + ch := event.Rune() + if ch != ' ' { + // It's not a space bar. Is it a shortcut? + var found bool + for index, item := range l.items { + if item.Shortcut == ch { + // We have a shortcut. + found = true + l.currentItem = index + break + } + } + if !found { + break + } + } + item := l.items[l.currentItem] + if item.Selected != nil { + item.Selected() + } + if l.selected != nil { + l.selected(l.currentItem, item.MainText, item.SecondaryText, item.Shortcut) + } + } + + if l.currentItem < 0 { + l.currentItem = len(l.items) - 1 + } else if l.currentItem >= len(l.items) { + l.currentItem = 0 + } + + if l.currentItem != previousItem && l.currentItem < len(l.items) && l.changed != nil { + item := l.items[l.currentItem] + l.changed(l.currentItem, item.MainText, item.SecondaryText, item.Shortcut) + } + }) +} diff --git a/vendor/github.com/rivo/tview/modal.go b/vendor/github.com/rivo/tview/modal.go new file mode 100644 index 00000000000..c388f222609 --- /dev/null +++ b/vendor/github.com/rivo/tview/modal.go @@ -0,0 +1,146 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// Modal is a centered message window used to inform the user or prompt them +// for an immediate decision. It needs to have at least one button (added via +// AddButtons()) or it will never disappear. +// +// See https://github.com/rivo/tview/wiki/Modal for an example. +type Modal struct { + *Box + + // The framed embedded in the modal. + frame *Frame + + // The form embedded in the modal's frame. + form *Form + + // The message text (original, not word-wrapped). + text string + + // The text color. + textColor tcell.Color + + // The optional callback for when the user clicked one of the buttons. It + // receives the index of the clicked button and the button's label. + done func(buttonIndex int, buttonLabel string) +} + +// NewModal returns a new modal message window. +func NewModal() *Modal { + m := &Modal{ + Box: NewBox(), + textColor: Styles.PrimaryTextColor, + } + m.form = NewForm(). + SetButtonsAlign(AlignCenter). + SetButtonBackgroundColor(Styles.PrimitiveBackgroundColor). + SetButtonTextColor(Styles.PrimaryTextColor) + m.form.SetBackgroundColor(Styles.ContrastBackgroundColor).SetBorderPadding(0, 0, 0, 0) + m.form.SetCancelFunc(func() { + if m.done != nil { + m.done(-1, "") + } + }) + m.frame = NewFrame(m.form).SetBorders(0, 0, 1, 0, 0, 0) + m.frame.SetBorder(true). + SetBackgroundColor(Styles.ContrastBackgroundColor). + SetBorderPadding(1, 1, 1, 1) + m.focus = m + return m +} + +// SetTextColor sets the color of the message text. +func (m *Modal) SetTextColor(color tcell.Color) *Modal { + m.textColor = color + return m +} + +// SetDoneFunc sets a handler which is called when one of the buttons was +// pressed. It receives the index of the button as well as its label text. The +// handler is also called when the user presses the Escape key. The index will +// then be negative and the label text an emptry string. +func (m *Modal) SetDoneFunc(handler func(buttonIndex int, buttonLabel string)) *Modal { + m.done = handler + return m +} + +// SetText sets the message text of the window. The text may contain line +// breaks. Note that words are wrapped, too, based on the final size of the +// window. +func (m *Modal) SetText(text string) *Modal { + m.text = text + return m +} + +// AddButtons adds buttons to the window. There must be at least one button and +// a "done" handler so the window can be closed again. +func (m *Modal) AddButtons(labels []string) *Modal { + for index, label := range labels { + func(i int, l string) { + m.form.AddButton(label, func() { + if m.done != nil { + m.done(i, l) + } + }) + button := m.form.GetButton(m.form.GetButtonCount() - 1) + button.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + switch event.Key() { + case tcell.KeyDown, tcell.KeyRight: + return tcell.NewEventKey(tcell.KeyTab, 0, tcell.ModNone) + case tcell.KeyUp, tcell.KeyLeft: + return tcell.NewEventKey(tcell.KeyBacktab, 0, tcell.ModNone) + } + return event + }) + }(index, label) + } + return m +} + +// Focus is called when this primitive receives focus. +func (m *Modal) Focus(delegate func(p Primitive)) { + delegate(m.form) +} + +// HasFocus returns whether or not this primitive has focus. +func (m *Modal) HasFocus() bool { + return m.form.HasFocus() +} + +// Draw draws this primitive onto the screen. +func (m *Modal) Draw(screen tcell.Screen) { + // Calculate the width of this modal. + buttonsWidth := 0 + for _, button := range m.form.buttons { + buttonsWidth += StringWidth(button.label) + 4 + 2 + } + buttonsWidth -= 2 + screenWidth, screenHeight := screen.Size() + width := screenWidth / 3 + if width < buttonsWidth { + width = buttonsWidth + } + // width is now without the box border. + + // Reset the text and find out how wide it is. + m.frame.Clear() + lines := WordWrap(m.text, width) + for _, line := range lines { + m.frame.AddText(line, true, AlignCenter, m.textColor) + } + + // Set the modal's position and size. + height := len(lines) + 6 + width += 4 + x := (screenWidth - width) / 2 + y := (screenHeight - height) / 2 + m.SetRect(x, y, width, height) + + // Draw the frame. + m.frame.SetRect(x, y, width, height) + m.frame.Draw(screen) +} diff --git a/vendor/github.com/rivo/tview/pages.go b/vendor/github.com/rivo/tview/pages.go new file mode 100644 index 00000000000..9af7fe97f15 --- /dev/null +++ b/vendor/github.com/rivo/tview/pages.go @@ -0,0 +1,248 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// page represents one page of a Pages object. +type page struct { + Name string // The page's name. + Item Primitive // The page's primitive. + Resize bool // Whether or not to resize the page when it is drawn. + Visible bool // Whether or not this page is visible. +} + +// Pages is a container for other primitives often used as the application's +// root primitive. It allows to easily switch the visibility of the contained +// primitives. +// +// See https://github.com/rivo/tview/wiki/Pages for an example. +type Pages struct { + *Box + + // The contained pages. + pages []*page + + // We keep a reference to the function which allows us to set the focus to + // a newly visible page. + setFocus func(p Primitive) + + // An optional handler which is called whenever the visibility or the order of + // pages changes. + changed func() +} + +// NewPages returns a new Pages object. +func NewPages() *Pages { + p := &Pages{ + Box: NewBox(), + } + p.focus = p + return p +} + +// SetChangedFunc sets a handler which is called whenever the visibility or the +// order of any visible pages changes. This can be used to redraw the pages. +func (p *Pages) SetChangedFunc(handler func()) *Pages { + p.changed = handler + return p +} + +// AddPage adds a new page with the given name and primitive. If there was +// previously a page with the same name, it is overwritten. Leaving the name +// empty may cause conflicts in other functions. +// +// Visible pages will be drawn in the order they were added (unless that order +// was changed in one of the other functions). If "resize" is set to true, the +// primitive will be set to the size available to the Pages primitive whenever +// the pages are drawn. +func (p *Pages) AddPage(name string, item Primitive, resize, visible bool) *Pages { + for index, pg := range p.pages { + if pg.Name == name { + p.pages = append(p.pages[:index], p.pages[index+1:]...) + break + } + } + p.pages = append(p.pages, &page{Item: item, Name: name, Resize: resize, Visible: visible}) + if p.changed != nil { + p.changed() + } + if p.HasFocus() { + p.Focus(p.setFocus) + } + return p +} + +// AddAndSwitchToPage calls AddPage(), then SwitchToPage() on that newly added +// page. +func (p *Pages) AddAndSwitchToPage(name string, item Primitive, resize bool) *Pages { + p.AddPage(name, item, resize, true) + p.SwitchToPage(name) + return p +} + +// RemovePage removes the page with the given name. +func (p *Pages) RemovePage(name string) *Pages { + hasFocus := p.HasFocus() + for index, page := range p.pages { + if page.Name == name { + p.pages = append(p.pages[:index], p.pages[index+1:]...) + if page.Visible && p.changed != nil { + p.changed() + } + break + } + } + if hasFocus { + p.Focus(p.setFocus) + } + return p +} + +// HasPage returns true if a page with the given name exists in this object. +func (p *Pages) HasPage(name string) bool { + for _, page := range p.pages { + if page.Name == name { + return true + } + } + return false +} + +// ShowPage sets a page's visibility to "true" (in addition to any other pages +// which are already visible). +func (p *Pages) ShowPage(name string) *Pages { + for _, page := range p.pages { + if page.Name == name { + page.Visible = true + if p.changed != nil { + p.changed() + } + break + } + } + if p.HasFocus() { + p.Focus(p.setFocus) + } + return p +} + +// HidePage sets a page's visibility to "false". +func (p *Pages) HidePage(name string) *Pages { + for _, page := range p.pages { + if page.Name == name { + page.Visible = false + if p.changed != nil { + p.changed() + } + break + } + } + if p.HasFocus() { + p.Focus(p.setFocus) + } + return p +} + +// SwitchToPage sets a page's visibility to "true" and all other pages' +// visibility to "false". +func (p *Pages) SwitchToPage(name string) *Pages { + for _, page := range p.pages { + if page.Name == name { + page.Visible = true + } else { + page.Visible = false + } + } + if p.changed != nil { + p.changed() + } + if p.HasFocus() { + p.Focus(p.setFocus) + } + return p +} + +// SendToFront changes the order of the pages such that the page with the given +// name comes last, causing it to be drawn last with the next update (if +// visible). +func (p *Pages) SendToFront(name string) *Pages { + for index, page := range p.pages { + if page.Name == name { + if index < len(p.pages)-1 { + p.pages = append(append(p.pages[:index], p.pages[index+1:]...), page) + } + if page.Visible && p.changed != nil { + p.changed() + } + break + } + } + if p.HasFocus() { + p.Focus(p.setFocus) + } + return p +} + +// SendToBack changes the order of the pages such that the page with the given +// name comes first, causing it to be drawn first with the next update (if +// visible). +func (p *Pages) SendToBack(name string) *Pages { + for index, pg := range p.pages { + if pg.Name == name { + if index > 0 { + p.pages = append(append([]*page{pg}, p.pages[:index]...), p.pages[index+1:]...) + } + if pg.Visible && p.changed != nil { + p.changed() + } + break + } + } + if p.HasFocus() { + p.Focus(p.setFocus) + } + return p +} + +// HasFocus returns whether or not this primitive has focus. +func (p *Pages) HasFocus() bool { + for _, page := range p.pages { + if page.Item.GetFocusable().HasFocus() { + return true + } + } + return false +} + +// Focus is called by the application when the primitive receives focus. +func (p *Pages) Focus(delegate func(p Primitive)) { + if delegate == nil { + return // We cannot delegate so we cannot focus. + } + p.setFocus = delegate + var topItem Primitive + for _, page := range p.pages { + if page.Visible { + topItem = page.Item + } + } + if topItem != nil { + delegate(topItem) + } +} + +// Draw draws this primitive onto the screen. +func (p *Pages) Draw(screen tcell.Screen) { + p.Box.Draw(screen) + for _, page := range p.pages { + if !page.Visible { + continue + } + if page.Resize { + x, y, width, height := p.GetInnerRect() + page.Item.SetRect(x, y, width, height) + } + page.Item.Draw(screen) + } +} diff --git a/vendor/github.com/rivo/tview/primitive.go b/vendor/github.com/rivo/tview/primitive.go new file mode 100644 index 00000000000..88a9d466b18 --- /dev/null +++ b/vendor/github.com/rivo/tview/primitive.go @@ -0,0 +1,46 @@ +package tview + +import "github.com/gdamore/tcell" + +// Primitive is the top-most interface for all graphical primitives. +type Primitive interface { + // Draw draws this primitive onto the screen. Implementers can call the + // screen's ShowCursor() function but should only do so when they have focus. + // (They will need to keep track of this themselves.) + Draw(screen tcell.Screen) + + // GetRect returns the current position of the primitive, x, y, width, and + // height. + GetRect() (int, int, int, int) + + // SetRect sets a new position of the primitive. + SetRect(x, y, width, height int) + + // InputHandler returns a handler which receives key events when it has focus. + // It is called by the Application class. + // + // A value of nil may also be returned, in which case this primitive cannot + // receive focus and will not process any key events. + // + // The handler will receive the key event and a function that allows it to + // set the focus to a different primitive, so that future key events are sent + // to that primitive. + // + // The Application's Draw() function will be called automatically after the + // handler returns. + // + // The Box class provides functionality to intercept keyboard input. If you + // subclass from Box, it is recommended that you wrap your handler using + // Box.WrapInputHandler() so you inherit that functionality. + InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) + + // Focus is called by the application when the primitive receives focus. + // Implementers may call delegate() to pass the focus on to another primitive. + Focus(delegate func(p Primitive)) + + // Blur is called by the application when the primitive loses focus. + Blur() + + // GetFocusable returns the item's Focusable. + GetFocusable() Focusable +} diff --git a/vendor/github.com/rivo/tview/semigraphics.go b/vendor/github.com/rivo/tview/semigraphics.go new file mode 100644 index 00000000000..3b66c5fc871 --- /dev/null +++ b/vendor/github.com/rivo/tview/semigraphics.go @@ -0,0 +1,296 @@ +package tview + +import "github.com/gdamore/tcell" + +// Semigraphics provides an easy way to access unicode characters for drawing. +// +// Named like the unicode characters, 'Semigraphics'-prefix used if unicode block +// isn't prefixed itself. +const ( + // Block: General Punctation U+2000-U+206F (http://unicode.org/charts/PDF/U2000.pdf) + SemigraphicsHorizontalEllipsis rune = '\u2026' // … + + // Block: Box Drawing U+2500-U+257F (http://unicode.org/charts/PDF/U2500.pdf) + BoxDrawingsLightHorizontal rune = '\u2500' // ─ + BoxDrawingsHeavyHorizontal rune = '\u2501' // ━ + BoxDrawingsLightVertical rune = '\u2502' // │ + BoxDrawingsHeavyVertical rune = '\u2503' // ┃ + BoxDrawingsLightTripleDashHorizontal rune = '\u2504' // ┄ + BoxDrawingsHeavyTripleDashHorizontal rune = '\u2505' // ┅ + BoxDrawingsLightTripleDashVertical rune = '\u2506' // ┆ + BoxDrawingsHeavyTripleDashVertical rune = '\u2507' // ┇ + BoxDrawingsLightQuadrupleDashHorizontal rune = '\u2508' // ┈ + BoxDrawingsHeavyQuadrupleDashHorizontal rune = '\u2509' // ┉ + BoxDrawingsLightQuadrupleDashVertical rune = '\u250a' // ┊ + BoxDrawingsHeavyQuadrupleDashVertical rune = '\u250b' // ┋ + BoxDrawingsLightDownAndRight rune = '\u250c' // ┌ + BoxDrawingsDownLighAndRightHeavy rune = '\u250d' // ┍ + BoxDrawingsDownHeavyAndRightLight rune = '\u250e' // ┎ + BoxDrawingsHeavyDownAndRight rune = '\u250f' // ┏ + BoxDrawingsLightDownAndLeft rune = '\u2510' // ┐ + BoxDrawingsDownLighAndLeftHeavy rune = '\u2511' // ┑ + BoxDrawingsDownHeavyAndLeftLight rune = '\u2512' // ┒ + BoxDrawingsHeavyDownAndLeft rune = '\u2513' // ┓ + BoxDrawingsLightUpAndRight rune = '\u2514' // └ + BoxDrawingsUpLightAndRightHeavy rune = '\u2515' // ┕ + BoxDrawingsUpHeavyAndRightLight rune = '\u2516' // ┖ + BoxDrawingsHeavyUpAndRight rune = '\u2517' // ┗ + BoxDrawingsLightUpAndLeft rune = '\u2518' // ┘ + BoxDrawingsUpLightAndLeftHeavy rune = '\u2519' // ┙ + BoxDrawingsUpHeavyAndLeftLight rune = '\u251a' // ┚ + BoxDrawingsHeavyUpAndLeft rune = '\u251b' // ┛ + BoxDrawingsLightVerticalAndRight rune = '\u251c' // ├ + BoxDrawingsVerticalLightAndRightHeavy rune = '\u251d' // ┝ + BoxDrawingsUpHeavyAndRightDownLight rune = '\u251e' // ┞ + BoxDrawingsDownHeacyAndRightUpLight rune = '\u251f' // ┟ + BoxDrawingsVerticalHeavyAndRightLight rune = '\u2520' // ┠ + BoxDrawingsDownLightAnbdRightUpHeavy rune = '\u2521' // ┡ + BoxDrawingsUpLightAndRightDownHeavy rune = '\u2522' // ┢ + BoxDrawingsHeavyVerticalAndRight rune = '\u2523' // ┣ + BoxDrawingsLightVerticalAndLeft rune = '\u2524' // ┤ + BoxDrawingsVerticalLightAndLeftHeavy rune = '\u2525' // ┥ + BoxDrawingsUpHeavyAndLeftDownLight rune = '\u2526' // ┦ + BoxDrawingsDownHeavyAndLeftUpLight rune = '\u2527' // ┧ + BoxDrawingsVerticalheavyAndLeftLight rune = '\u2528' // ┨ + BoxDrawingsDownLightAndLeftUpHeavy rune = '\u2529' // ┨ + BoxDrawingsUpLightAndLeftDownHeavy rune = '\u252a' // ┪ + BoxDrawingsHeavyVerticalAndLeft rune = '\u252b' // ┫ + BoxDrawingsLightDownAndHorizontal rune = '\u252c' // ┬ + BoxDrawingsLeftHeavyAndRightDownLight rune = '\u252d' // ┭ + BoxDrawingsRightHeavyAndLeftDownLight rune = '\u252e' // ┮ + BoxDrawingsDownLightAndHorizontalHeavy rune = '\u252f' // ┯ + BoxDrawingsDownHeavyAndHorizontalLight rune = '\u2530' // ┰ + BoxDrawingsRightLightAndLeftDownHeavy rune = '\u2531' // ┱ + BoxDrawingsLeftLightAndRightDownHeavy rune = '\u2532' // ┲ + BoxDrawingsHeavyDownAndHorizontal rune = '\u2533' // ┳ + BoxDrawingsLightUpAndHorizontal rune = '\u2534' // ┴ + BoxDrawingsLeftHeavyAndRightUpLight rune = '\u2535' // ┵ + BoxDrawingsRightHeavyAndLeftUpLight rune = '\u2536' // ┶ + BoxDrawingsUpLightAndHorizontalHeavy rune = '\u2537' // ┷ + BoxDrawingsUpHeavyAndHorizontalLight rune = '\u2538' // ┸ + BoxDrawingsRightLightAndLeftUpHeavy rune = '\u2539' // ┹ + BoxDrawingsLeftLightAndRightUpHeavy rune = '\u253a' // ┺ + BoxDrawingsHeavyUpAndHorizontal rune = '\u253b' // ┻ + BoxDrawingsLightVerticalAndHorizontal rune = '\u253c' // ┼ + BoxDrawingsLeftHeavyAndRightVerticalLight rune = '\u253d' // ┽ + BoxDrawingsRightHeavyAndLeftVerticalLight rune = '\u253e' // ┾ + BoxDrawingsVerticalLightAndHorizontalHeavy rune = '\u253f' // ┿ + BoxDrawingsUpHeavyAndDownHorizontalLight rune = '\u2540' // ╀ + BoxDrawingsDownHeavyAndUpHorizontalLight rune = '\u2541' // ╁ + BoxDrawingsVerticalHeavyAndHorizontalLight rune = '\u2542' // ╂ + BoxDrawingsLeftUpHeavyAndRightDownLight rune = '\u2543' // ╃ + BoxDrawingsRightUpHeavyAndLeftDownLight rune = '\u2544' // ╄ + BoxDrawingsLeftDownHeavyAndRightUpLight rune = '\u2545' // ╅ + BoxDrawingsRightDownHeavyAndLeftUpLight rune = '\u2546' // ╆ + BoxDrawingsDownLightAndUpHorizontalHeavy rune = '\u2547' // ╇ + BoxDrawingsUpLightAndDownHorizontalHeavy rune = '\u2548' // ╈ + BoxDrawingsRightLightAndLeftVerticalHeavy rune = '\u2549' // ╉ + BoxDrawingsLeftLightAndRightVerticalHeavy rune = '\u254a' // ╊ + BoxDrawingsHeavyVerticalAndHorizontal rune = '\u254b' // ╋ + BoxDrawingsLightDoubleDashHorizontal rune = '\u254c' // ╌ + BoxDrawingsHeavyDoubleDashHorizontal rune = '\u254d' // ╍ + BoxDrawingsLightDoubleDashVertical rune = '\u254e' // ╎ + BoxDrawingsHeavyDoubleDashVertical rune = '\u254f' // ╏ + BoxDrawingsDoubleHorizontal rune = '\u2550' // ═ + BoxDrawingsDoubleVertical rune = '\u2551' // ║ + BoxDrawingsDownSingleAndRightDouble rune = '\u2552' // ╒ + BoxDrawingsDownDoubleAndRightSingle rune = '\u2553' // ╓ + BoxDrawingsDoubleDownAndRight rune = '\u2554' // ╔ + BoxDrawingsDownSingleAndLeftDouble rune = '\u2555' // ╕ + BoxDrawingsDownDoubleAndLeftSingle rune = '\u2556' // ╖ + BoxDrawingsDoubleDownAndLeft rune = '\u2557' // ╗ + BoxDrawingsUpSingleAndRightDouble rune = '\u2558' // ╘ + BoxDrawingsUpDoubleAndRightSingle rune = '\u2559' // ╙ + BoxDrawingsDoubleUpAndRight rune = '\u255a' // ╚ + BoxDrawingsUpSingleAndLeftDouble rune = '\u255b' // ╛ + BoxDrawingsUpDobuleAndLeftSingle rune = '\u255c' // ╜ + BoxDrawingsDoubleUpAndLeft rune = '\u255d' // ╝ + BoxDrawingsVerticalSingleAndRightDouble rune = '\u255e' // ╞ + BoxDrawingsVerticalDoubleAndRightSingle rune = '\u255f' // ╟ + BoxDrawingsDoubleVerticalAndRight rune = '\u2560' // ╠ + BoxDrawingsVerticalSingleAndLeftDouble rune = '\u2561' // ╡ + BoxDrawingsVerticalDoubleAndLeftSingle rune = '\u2562' // ╢ + BoxDrawingsDoubleVerticalAndLeft rune = '\u2563' // ╣ + BoxDrawingsDownSingleAndHorizontalDouble rune = '\u2564' // ╤ + BoxDrawingsDownDoubleAndHorizontalSingle rune = '\u2565' // ╥ + BoxDrawingsDoubleDownAndHorizontal rune = '\u2566' // ╦ + BoxDrawingsUpSingleAndHorizontalDouble rune = '\u2567' // ╧ + BoxDrawingsUpDoubleAndHorizontalSingle rune = '\u2568' // ╨ + BoxDrawingsDoubleUpAndHorizontal rune = '\u2569' // ╩ + BoxDrawingsVerticalSingleAndHorizontalDouble rune = '\u256a' // ╪ + BoxDrawingsVerticalDoubleAndHorizontalSingle rune = '\u256b' // ╫ + BoxDrawingsDoubleVerticalAndHorizontal rune = '\u256c' // ╬ + BoxDrawingsLightArcDownAndRight rune = '\u256d' // ╭ + BoxDrawingsLightArcDownAndLeft rune = '\u256e' // ╮ + BoxDrawingsLightArcUpAndLeft rune = '\u256f' // ╯ + BoxDrawingsLightArcUpAndRight rune = '\u2570' // ╰ + BoxDrawingsLightDiagonalUpperRightToLowerLeft rune = '\u2571' // ╱ + BoxDrawingsLightDiagonalUpperLeftToLowerRight rune = '\u2572' // ╲ + BoxDrawingsLightDiagonalCross rune = '\u2573' // ╳ + BoxDrawingsLightLeft rune = '\u2574' // ╴ + BoxDrawingsLightUp rune = '\u2575' // ╵ + BoxDrawingsLightRight rune = '\u2576' // ╶ + BoxDrawingsLightDown rune = '\u2577' // ╷ + BoxDrawingsHeavyLeft rune = '\u2578' // ╸ + BoxDrawingsHeavyUp rune = '\u2579' // ╹ + BoxDrawingsHeavyRight rune = '\u257a' // ╺ + BoxDrawingsHeavyDown rune = '\u257b' // ╻ + BoxDrawingsLightLeftAndHeavyRight rune = '\u257c' // ╼ + BoxDrawingsLightUpAndHeavyDown rune = '\u257d' // ╽ + BoxDrawingsHeavyLeftAndLightRight rune = '\u257e' // ╾ + BoxDrawingsHeavyUpAndLightDown rune = '\u257f' // ╿ +) + +// SemigraphicJoints is a map for joining semigraphic (or otherwise) runes. +// So far only light lines are supported but if you want to change the border +// styling you need to provide the joints, too. +// The matching will be sorted ascending by rune value, so you don't need to +// provide all rune combinations, +// e.g. (─) + (│) = (┼) will also match (│) + (─) = (┼) +var SemigraphicJoints = map[string]rune{ + // (─) + (│) = (┼) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightVertical}): BoxDrawingsLightVerticalAndHorizontal, + // (─) + (┌) = (┬) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightDownAndRight}): BoxDrawingsLightDownAndHorizontal, + // (─) + (┐) = (┬) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightDownAndLeft}): BoxDrawingsLightDownAndHorizontal, + // (─) + (└) = (┴) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightUpAndRight}): BoxDrawingsLightUpAndHorizontal, + // (─) + (┘) = (┴) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightUpAndLeft}): BoxDrawingsLightUpAndHorizontal, + // (─) + (├) = (┼) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightVerticalAndRight}): BoxDrawingsLightVerticalAndHorizontal, + // (─) + (┤) = (┼) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndHorizontal, + // (─) + (┬) = (┬) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightDownAndHorizontal, + // (─) + (┴) = (┴) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightUpAndHorizontal, + // (─) + (┼) = (┼) + string([]rune{BoxDrawingsLightHorizontal, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (│) + (┌) = (├) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightDownAndRight}): BoxDrawingsLightVerticalAndRight, + // (│) + (┐) = (┤) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightDownAndLeft}): BoxDrawingsLightVerticalAndLeft, + // (│) + (└) = (├) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightUpAndRight}): BoxDrawingsLightVerticalAndRight, + // (│) + (┘) = (┤) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightUpAndLeft}): BoxDrawingsLightVerticalAndLeft, + // (│) + (├) = (├) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightVerticalAndRight}): BoxDrawingsLightVerticalAndRight, + // (│) + (┤) = (┤) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndLeft, + // (│) + (┬) = (┼) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (│) + (┴) = (┼) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (│) + (┼) = (┼) + string([]rune{BoxDrawingsLightVertical, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (┌) + (┐) = (┬) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightDownAndLeft}): BoxDrawingsLightDownAndHorizontal, + // (┌) + (└) = (├) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightUpAndRight}): BoxDrawingsLightVerticalAndRight, + // (┌) + (┘) = (┼) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightUpAndLeft}): BoxDrawingsLightVerticalAndHorizontal, + // (┌) + (├) = (├) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightVerticalAndRight}): BoxDrawingsLightVerticalAndRight, + // (┌) + (┤) = (┼) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndHorizontal, + // (┌) + (┬) = (┬) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightDownAndHorizontal, + // (┌) + (┴) = (┼) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (┌) + (┴) = (┼) + string([]rune{BoxDrawingsLightDownAndRight, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (┐) + (└) = (┼) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightUpAndRight}): BoxDrawingsLightVerticalAndHorizontal, + // (┐) + (┘) = (┤) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightUpAndLeft}): BoxDrawingsLightVerticalAndLeft, + // (┐) + (├) = (┼) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightVerticalAndRight}): BoxDrawingsLightVerticalAndHorizontal, + // (┐) + (┤) = (┤) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndLeft, + // (┐) + (┬) = (┬) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightDownAndHorizontal, + // (┐) + (┴) = (┼) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (┐) + (┼) = (┼) + string([]rune{BoxDrawingsLightDownAndLeft, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (└) + (┘) = (┴) + string([]rune{BoxDrawingsLightUpAndRight, BoxDrawingsLightUpAndLeft}): BoxDrawingsLightUpAndHorizontal, + // (└) + (├) = (├) + string([]rune{BoxDrawingsLightUpAndRight, BoxDrawingsLightVerticalAndRight}): BoxDrawingsLightVerticalAndRight, + // (└) + (┤) = (┼) + string([]rune{BoxDrawingsLightUpAndRight, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndHorizontal, + // (└) + (┬) = (┼) + string([]rune{BoxDrawingsLightUpAndRight, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (└) + (┴) = (┴) + string([]rune{BoxDrawingsLightUpAndRight, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightUpAndHorizontal, + // (└) + (┼) = (┼) + string([]rune{BoxDrawingsLightUpAndRight, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (┘) + (├) = (┼) + string([]rune{BoxDrawingsLightUpAndLeft, BoxDrawingsLightVerticalAndRight}): BoxDrawingsLightVerticalAndHorizontal, + // (┘) + (┤) = (┤) + string([]rune{BoxDrawingsLightUpAndLeft, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndLeft, + // (┘) + (┬) = (┼) + string([]rune{BoxDrawingsLightUpAndLeft, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (┘) + (┴) = (┴) + string([]rune{BoxDrawingsLightUpAndLeft, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightUpAndHorizontal, + // (┘) + (┼) = (┼) + string([]rune{BoxDrawingsLightUpAndLeft, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (├) + (┤) = (┼) + string([]rune{BoxDrawingsLightVerticalAndRight, BoxDrawingsLightVerticalAndLeft}): BoxDrawingsLightVerticalAndHorizontal, + // (├) + (┬) = (┼) + string([]rune{BoxDrawingsLightVerticalAndRight, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (├) + (┴) = (┼) + string([]rune{BoxDrawingsLightVerticalAndRight, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (├) + (┼) = (┼) + string([]rune{BoxDrawingsLightVerticalAndRight, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (┤) + (┬) = (┼) + string([]rune{BoxDrawingsLightVerticalAndLeft, BoxDrawingsLightDownAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (┤) + (┴) = (┼) + string([]rune{BoxDrawingsLightVerticalAndLeft, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (┤) + (┼) = (┼) + string([]rune{BoxDrawingsLightVerticalAndLeft, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (┬) + (┴) = (┼) + string([]rune{BoxDrawingsLightDownAndHorizontal, BoxDrawingsLightUpAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + // (┬) + (┼) = (┼) + string([]rune{BoxDrawingsLightDownAndHorizontal, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, + + // (┴) + (┼) = (┼) + string([]rune{BoxDrawingsLightUpAndHorizontal, BoxDrawingsLightVerticalAndHorizontal}): BoxDrawingsLightVerticalAndHorizontal, +} + +// PrintJoinedSemigraphics prints a semigraphics rune into the screen at the given +// position with the given color, joining it with any existing semigraphics +// rune. Background colors are preserved. At this point, only regular single +// line borders are supported. +func PrintJoinedSemigraphics(screen tcell.Screen, x, y int, ch rune, color tcell.Color) { + previous, _, style, _ := screen.GetContent(x, y) + style = style.Foreground(color) + + // What's the resulting rune? + var result rune + if ch == previous { + result = ch + } else { + if ch < previous { + previous, ch = ch, previous + } + result = SemigraphicJoints[string([]rune{previous, ch})] + } + if result == 0 { + result = ch + } + + // We only print something if we have something. + screen.SetContent(x, y, result, nil, style) +} diff --git a/vendor/github.com/rivo/tview/styles.go b/vendor/github.com/rivo/tview/styles.go new file mode 100644 index 00000000000..dbd60ad068e --- /dev/null +++ b/vendor/github.com/rivo/tview/styles.go @@ -0,0 +1,34 @@ +package tview + +import "github.com/gdamore/tcell" + +// Styles defines various colors used when primitives are initialized. These +// may be changed to accommodate a different look and feel. +// +// The default is for applications with a black background and basic colors: +// black, white, yellow, green, and blue. +var Styles = struct { + PrimitiveBackgroundColor tcell.Color // Main background color for primitives. + ContrastBackgroundColor tcell.Color // Background color for contrasting elements. + MoreContrastBackgroundColor tcell.Color // Background color for even more contrasting elements. + BorderColor tcell.Color // Box borders. + TitleColor tcell.Color // Box titles. + GraphicsColor tcell.Color // Graphics. + PrimaryTextColor tcell.Color // Primary text. + SecondaryTextColor tcell.Color // Secondary text (e.g. labels). + TertiaryTextColor tcell.Color // Tertiary text (e.g. subtitles, notes). + InverseTextColor tcell.Color // Text on primary-colored backgrounds. + ContrastSecondaryTextColor tcell.Color // Secondary text on ContrastBackgroundColor-colored backgrounds. +}{ + PrimitiveBackgroundColor: tcell.ColorBlack, + ContrastBackgroundColor: tcell.ColorBlue, + MoreContrastBackgroundColor: tcell.ColorGreen, + BorderColor: tcell.ColorWhite, + TitleColor: tcell.ColorWhite, + GraphicsColor: tcell.ColorWhite, + PrimaryTextColor: tcell.ColorWhite, + SecondaryTextColor: tcell.ColorYellow, + TertiaryTextColor: tcell.ColorGreen, + InverseTextColor: tcell.ColorBlue, + ContrastSecondaryTextColor: tcell.ColorDarkCyan, +} diff --git a/vendor/github.com/rivo/tview/table.go b/vendor/github.com/rivo/tview/table.go new file mode 100644 index 00000000000..265aa9f217f --- /dev/null +++ b/vendor/github.com/rivo/tview/table.go @@ -0,0 +1,1143 @@ +package tview + +import ( + "sort" + + "github.com/gdamore/tcell" + colorful "github.com/lucasb-eyer/go-colorful" +) + +// TableCell represents one cell inside a Table. You can instantiate this type +// directly but all colors (background and text) will be set to their default +// which is black. +type TableCell struct { + // The reference object. + Reference interface{} + + // The text to be displayed in the table cell. + Text string + + // The alignment of the cell text. One of AlignLeft (default), AlignCenter, + // or AlignRight. + Align int + + // The maximum width of the cell in screen space. This is used to give a + // column a maximum width. Any cell text whose screen width exceeds this width + // is cut off. Set to 0 if there is no maximum width. + MaxWidth int + + // If the total table width is less than the available width, this value is + // used to add extra width to a column. See SetExpansion() for details. + Expansion int + + // The color of the cell text. + Color tcell.Color + + // The background color of the cell. + BackgroundColor tcell.Color + + // The style attributes of the cell. + Attributes tcell.AttrMask + + // If set to true, this cell cannot be selected. + NotSelectable bool + + // The position and width of the cell the last time table was drawn. + x, y, width int +} + +// NewTableCell returns a new table cell with sensible defaults. That is, left +// aligned text with the primary text color (see Styles) and a transparent +// background (using the background of the Table). +func NewTableCell(text string) *TableCell { + return &TableCell{ + Text: text, + Align: AlignLeft, + Color: Styles.PrimaryTextColor, + BackgroundColor: tcell.ColorDefault, + } +} + +// SetText sets the cell's text. +func (c *TableCell) SetText(text string) *TableCell { + c.Text = text + return c +} + +// SetAlign sets the cell's text alignment, one of AlignLeft, AlignCenter, or +// AlignRight. +func (c *TableCell) SetAlign(align int) *TableCell { + c.Align = align + return c +} + +// SetMaxWidth sets maximum width of the cell in screen space. This is used to +// give a column a maximum width. Any cell text whose screen width exceeds this +// width is cut off. Set to 0 if there is no maximum width. +func (c *TableCell) SetMaxWidth(maxWidth int) *TableCell { + c.MaxWidth = maxWidth + return c +} + +// SetExpansion sets the value by which the column of this cell expands if the +// available width for the table is more than the table width (prior to applying +// this expansion value). This is a proportional value. The amount of unused +// horizontal space is divided into widths to be added to each column. How much +// extra width a column receives depends on the expansion value: A value of 0 +// (the default) will not cause the column to increase in width. Other values +// are proportional, e.g. a value of 2 will cause a column to grow by twice +// the amount of a column with a value of 1. +// +// Since this value affects an entire column, the maximum over all visible cells +// in that column is used. +// +// This function panics if a negative value is provided. +func (c *TableCell) SetExpansion(expansion int) *TableCell { + if expansion < 0 { + panic("Table cell expansion values may not be negative") + } + c.Expansion = expansion + return c +} + +// SetTextColor sets the cell's text color. +func (c *TableCell) SetTextColor(color tcell.Color) *TableCell { + c.Color = color + return c +} + +// SetBackgroundColor sets the cell's background color. Set to +// tcell.ColorDefault to use the table's background color. +func (c *TableCell) SetBackgroundColor(color tcell.Color) *TableCell { + c.BackgroundColor = color + return c +} + +// SetAttributes sets the cell's text attributes. You can combine different +// attributes using bitmask operations: +// +// cell.SetAttributes(tcell.AttrUnderline | tcell.AttrBold) +func (c *TableCell) SetAttributes(attr tcell.AttrMask) *TableCell { + c.Attributes = attr + return c +} + +// SetStyle sets the cell's style (foreground color, background color, and +// attributes) all at once. +func (c *TableCell) SetStyle(style tcell.Style) *TableCell { + c.Color, c.BackgroundColor, c.Attributes = style.Decompose() + return c +} + +// SetSelectable sets whether or not this cell can be selected by the user. +func (c *TableCell) SetSelectable(selectable bool) *TableCell { + c.NotSelectable = !selectable + return c +} + +// SetReference allows you to store a reference of any type in this cell. This +// will allow you to establish a mapping between the cell and your +// actual data. +func (c *TableCell) SetReference(reference interface{}) *TableCell { + c.Reference = reference + return c +} + +// GetReference returns this cell's reference object. +func (c *TableCell) GetReference() interface{} { + return c.Reference +} + +// GetLastPosition returns the position of the table cell the last time it was +// drawn on screen. If the cell is not on screen, the return values are +// undefined. +// +// Because the Table class will attempt to keep selected cells on screen, this +// function is most useful in response to a "selected" event (see +// SetSelectedFunc()) or a "selectionChanged" event (see +// SetSelectionChangedFunc()). +func (c *TableCell) GetLastPosition() (x, y, width int) { + return c.x, c.y, c.width +} + +// Table visualizes two-dimensional data consisting of rows and columns. Each +// Table cell is defined via SetCell() by the TableCell type. They can be added +// dynamically to the table and changed any time. +// +// The most compact display of a table is without borders. Each row will then +// occupy one row on screen and columns are separated by the rune defined via +// SetSeparator() (a space character by default). +// +// When borders are turned on (via SetBorders()), each table cell is surrounded +// by lines. Therefore one table row will require two rows on screen. +// +// Columns will use as much horizontal space as they need. You can constrain +// their size with the MaxWidth parameter of the TableCell type. +// +// Fixed Columns +// +// You can define fixed rows and rolumns via SetFixed(). They will always stay +// in their place, even when the table is scrolled. Fixed rows are always the +// top rows. Fixed columns are always the leftmost columns. +// +// Selections +// +// You can call SetSelectable() to set columns and/or rows to "selectable". If +// the flag is set only for columns, entire columns can be selected by the user. +// If it is set only for rows, entire rows can be selected. If both flags are +// set, individual cells can be selected. The "selected" handler set via +// SetSelectedFunc() is invoked when the user presses Enter on a selection. +// +// Navigation +// +// If the table extends beyond the available space, it can be navigated with +// key bindings similar to Vim: +// +// - h, left arrow: Move left by one column. +// - l, right arrow: Move right by one column. +// - j, down arrow: Move down by one row. +// - k, up arrow: Move up by one row. +// - g, home: Move to the top. +// - G, end: Move to the bottom. +// - Ctrl-F, page down: Move down by one page. +// - Ctrl-B, page up: Move up by one page. +// +// When there is no selection, this affects the entire table (except for fixed +// rows and columns). When there is a selection, the user moves the selection. +// The class will attempt to keep the selection from moving out of the screen. +// +// Use SetInputCapture() to override or modify keyboard input. +// +// See https://github.com/rivo/tview/wiki/Table for an example. +type Table struct { + *Box + + // Whether or not this table has borders around each cell. + borders bool + + // The color of the borders or the separator. + bordersColor tcell.Color + + // If there are no borders, the column separator. + separator rune + + // The cells of the table. Rows first, then columns. + cells [][]*TableCell + + // The rightmost column in the data set. + lastColumn int + + // The number of fixed rows / columns. + fixedRows, fixedColumns int + + // Whether or not rows or columns can be selected. If both are set to true, + // cells can be selected. + rowsSelectable, columnsSelectable bool + + // The currently selected row and column. + selectedRow, selectedColumn int + + // The number of rows/columns by which the table is scrolled down/to the + // right. + rowOffset, columnOffset int + + // If set to true, the table's last row will always be visible. + trackEnd bool + + // The number of visible rows the last time the table was drawn. + visibleRows int + + // The style of the selected rows. If this value is 0, selected rows are + // simply inverted. + selectedStyle tcell.Style + + // An optional function which gets called when the user presses Enter on a + // selected cell. If entire rows selected, the column value is undefined. + // Likewise for entire columns. + selected func(row, column int) + + // An optional function which gets called when the user changes the selection. + // If entire rows selected, the column value is undefined. + // Likewise for entire columns. + selectionChanged func(row, column int) + + // An optional function which gets called when the user presses Escape, Tab, + // or Backtab. Also when the user presses Enter if nothing is selectable. + done func(key tcell.Key) +} + +// NewTable returns a new table. +func NewTable() *Table { + return &Table{ + Box: NewBox(), + bordersColor: Styles.GraphicsColor, + separator: ' ', + lastColumn: -1, + } +} + +// Clear removes all table data. +func (t *Table) Clear() *Table { + t.cells = nil + t.lastColumn = -1 + return t +} + +// SetBorders sets whether or not each cell in the table is surrounded by a +// border. +func (t *Table) SetBorders(show bool) *Table { + t.borders = show + return t +} + +// SetBordersColor sets the color of the cell borders. +func (t *Table) SetBordersColor(color tcell.Color) *Table { + t.bordersColor = color + return t +} + +// SetSelectedStyle sets a specific style for selected cells. If no such style +// is set, per default, selected cells are inverted (i.e. their foreground and +// background colors are swapped). +// +// To reset a previous setting to its default, make the following call: +// +// table.SetSelectedStyle(tcell.ColorDefault, tcell.ColorDefault, 0) +func (t *Table) SetSelectedStyle(foregroundColor, backgroundColor tcell.Color, attributes tcell.AttrMask) *Table { + t.selectedStyle = tcell.StyleDefault.Foreground(foregroundColor).Background(backgroundColor) | tcell.Style(attributes) + return t +} + +// SetSeparator sets the character used to fill the space between two +// neighboring cells. This is a space character ' ' per default but you may +// want to set it to Borders.Vertical (or any other rune) if the column +// separation should be more visible. If cell borders are activated, this is +// ignored. +// +// Separators have the same color as borders. +func (t *Table) SetSeparator(separator rune) *Table { + t.separator = separator + return t +} + +// SetFixed sets the number of fixed rows and columns which are always visible +// even when the rest of the cells are scrolled out of view. Rows are always the +// top-most ones. Columns are always the left-most ones. +func (t *Table) SetFixed(rows, columns int) *Table { + t.fixedRows, t.fixedColumns = rows, columns + return t +} + +// SetSelectable sets the flags which determine what can be selected in a table. +// There are three selection modi: +// +// - rows = false, columns = false: Nothing can be selected. +// - rows = true, columns = false: Rows can be selected. +// - rows = false, columns = true: Columns can be selected. +// - rows = true, columns = true: Individual cells can be selected. +func (t *Table) SetSelectable(rows, columns bool) *Table { + t.rowsSelectable, t.columnsSelectable = rows, columns + return t +} + +// GetSelectable returns what can be selected in a table. Refer to +// SetSelectable() for details. +func (t *Table) GetSelectable() (rows, columns bool) { + return t.rowsSelectable, t.columnsSelectable +} + +// GetSelection returns the position of the current selection. +// If entire rows are selected, the column index is undefined. +// Likewise for entire columns. +func (t *Table) GetSelection() (row, column int) { + return t.selectedRow, t.selectedColumn +} + +// Select sets the selected cell. Depending on the selection settings +// specified via SetSelectable(), this may be an entire row or column, or even +// ignored completely. +func (t *Table) Select(row, column int) *Table { + t.selectedRow, t.selectedColumn = row, column + return t +} + +// SetOffset sets how many rows and columns should be skipped when drawing the +// table. This is useful for large tables that do not fit on the screen. +// Navigating a selection can change these values. +// +// Fixed rows and columns are never skipped. +func (t *Table) SetOffset(row, column int) *Table { + t.rowOffset, t.columnOffset = row, column + return t +} + +// GetOffset returns the current row and column offset. This indicates how many +// rows and columns the table is scrolled down and to the right. +func (t *Table) GetOffset() (row, column int) { + return t.rowOffset, t.columnOffset +} + +// SetSelectedFunc sets a handler which is called whenever the user presses the +// Enter key on a selected cell/row/column. The handler receives the position of +// the selection and its cell contents. If entire rows are selected, the column +// index is undefined. Likewise for entire columns. +func (t *Table) SetSelectedFunc(handler func(row, column int)) *Table { + t.selected = handler + return t +} + +// SetSelectionChangedFunc sets a handler which is called whenever the user +// navigates to a new selection. The handler receives the position of the new +// selection. If entire rows are selected, the column index is undefined. +// Likewise for entire columns. +func (t *Table) SetSelectionChangedFunc(handler func(row, column int)) *Table { + t.selectionChanged = handler + return t +} + +// SetDoneFunc sets a handler which is called whenever the user presses the +// Escape, Tab, or Backtab key. If nothing is selected, it is also called when +// user presses the Enter key (because pressing Enter on a selection triggers +// the "selected" handler set via SetSelectedFunc()). +func (t *Table) SetDoneFunc(handler func(key tcell.Key)) *Table { + t.done = handler + return t +} + +// SetCell sets the content of a cell the specified position. It is ok to +// directly instantiate a TableCell object. If the cell has content, at least +// the Text and Color fields should be set. +// +// Note that setting cells in previously unknown rows and columns will +// automatically extend the internal table representation, e.g. starting with +// a row of 100,000 will immediately create 100,000 empty rows. +// +// To avoid unnecessary garbage collection, fill columns from left to right. +func (t *Table) SetCell(row, column int, cell *TableCell) *Table { + if row >= len(t.cells) { + t.cells = append(t.cells, make([][]*TableCell, row-len(t.cells)+1)...) + } + rowLen := len(t.cells[row]) + if column >= rowLen { + t.cells[row] = append(t.cells[row], make([]*TableCell, column-rowLen+1)...) + for c := rowLen; c < column; c++ { + t.cells[row][c] = &TableCell{} + } + } + t.cells[row][column] = cell + if column > t.lastColumn { + t.lastColumn = column + } + return t +} + +// SetCellSimple calls SetCell() with the given text, left-aligned, in white. +func (t *Table) SetCellSimple(row, column int, text string) *Table { + t.SetCell(row, column, NewTableCell(text)) + return t +} + +// GetCell returns the contents of the cell at the specified position. A valid +// TableCell object is always returned but it will be uninitialized if the cell +// was not previously set. Such an uninitialized object will not automatically +// be inserted. Therefore, repeated calls to this function may return different +// pointers for uninitialized cells. +func (t *Table) GetCell(row, column int) *TableCell { + if row >= len(t.cells) || column >= len(t.cells[row]) { + return &TableCell{} + } + return t.cells[row][column] +} + +// RemoveRow removes the row at the given position from the table. If there is +// no such row, this has no effect. +func (t *Table) RemoveRow(row int) *Table { + if row < 0 || row >= len(t.cells) { + return t + } + + t.cells = append(t.cells[:row], t.cells[row+1:]...) + + return t +} + +// RemoveColumn removes the column at the given position from the table. If +// there is no such column, this has no effect. +func (t *Table) RemoveColumn(column int) *Table { + for row := range t.cells { + if column < 0 || column >= len(t.cells[row]) { + continue + } + t.cells[row] = append(t.cells[row][:column], t.cells[row][column+1:]...) + } + + return t +} + +// InsertRow inserts a row before the row with the given index. Cells on the +// given row and below will be shifted to the bottom by one row. If "row" is +// equal or larger than the current number of rows, this function has no effect. +func (t *Table) InsertRow(row int) *Table { + if row >= len(t.cells) { + return t + } + t.cells = append(t.cells, nil) // Extend by one. + copy(t.cells[row+1:], t.cells[row:]) // Shift down. + t.cells[row] = nil // New row is uninitialized. + return t +} + +// InsertColumn inserts a column before the column with the given index. Cells +// in the given column and to its right will be shifted to the right by one +// column. Rows that have fewer initialized cells than "column" will remain +// unchanged. +func (t *Table) InsertColumn(column int) *Table { + for row := range t.cells { + if column >= len(t.cells[row]) { + continue + } + t.cells[row] = append(t.cells[row], nil) // Extend by one. + copy(t.cells[row][column+1:], t.cells[row][column:]) // Shift to the right. + t.cells[row][column] = &TableCell{} // New element is an uninitialized table cell. + } + return t +} + +// GetRowCount returns the number of rows in the table. +func (t *Table) GetRowCount() int { + return len(t.cells) +} + +// GetColumnCount returns the (maximum) number of columns in the table. +func (t *Table) GetColumnCount() int { + if len(t.cells) == 0 { + return 0 + } + return t.lastColumn + 1 +} + +// ScrollToBeginning scrolls the table to the beginning to that the top left +// corner of the table is shown. Note that this position may be corrected if +// there is a selection. +func (t *Table) ScrollToBeginning() *Table { + t.trackEnd = false + t.columnOffset = 0 + t.rowOffset = 0 + return t +} + +// ScrollToEnd scrolls the table to the beginning to that the bottom left corner +// of the table is shown. Adding more rows to the table will cause it to +// automatically scroll with the new data. Note that this position may be +// corrected if there is a selection. +func (t *Table) ScrollToEnd() *Table { + t.trackEnd = true + t.columnOffset = 0 + t.rowOffset = len(t.cells) + return t +} + +// Draw draws this primitive onto the screen. +func (t *Table) Draw(screen tcell.Screen) { + t.Box.Draw(screen) + + // What's our available screen space? + x, y, width, height := t.GetInnerRect() + if t.borders { + t.visibleRows = height / 2 + } else { + t.visibleRows = height + } + + // Return the cell at the specified position (nil if it doesn't exist). + getCell := func(row, column int) *TableCell { + if row < 0 || column < 0 || row >= len(t.cells) || column >= len(t.cells[row]) { + return nil + } + return t.cells[row][column] + } + + // If this cell is not selectable, find the next one. + if t.rowsSelectable || t.columnsSelectable { + if t.selectedColumn < 0 { + t.selectedColumn = 0 + } + if t.selectedRow < 0 { + t.selectedRow = 0 + } + for t.selectedRow < len(t.cells) { + cell := getCell(t.selectedRow, t.selectedColumn) + if cell == nil || !cell.NotSelectable { + break + } + t.selectedColumn++ + if t.selectedColumn > t.lastColumn { + t.selectedColumn = 0 + t.selectedRow++ + } + } + } + + // Clamp row offsets. + if t.rowsSelectable { + if t.selectedRow >= t.fixedRows && t.selectedRow < t.fixedRows+t.rowOffset { + t.rowOffset = t.selectedRow - t.fixedRows + t.trackEnd = false + } + if t.borders { + if 2*(t.selectedRow+1-t.rowOffset) >= height { + t.rowOffset = t.selectedRow + 1 - height/2 + t.trackEnd = false + } + } else { + if t.selectedRow+1-t.rowOffset >= height { + t.rowOffset = t.selectedRow + 1 - height + t.trackEnd = false + } + } + } + if t.borders { + if 2*(len(t.cells)-t.rowOffset) < height { + t.trackEnd = true + } + } else { + if len(t.cells)-t.rowOffset < height { + t.trackEnd = true + } + } + if t.trackEnd { + if t.borders { + t.rowOffset = len(t.cells) - height/2 + } else { + t.rowOffset = len(t.cells) - height + } + } + if t.rowOffset < 0 { + t.rowOffset = 0 + } + + // Clamp column offset. (Only left side here. The right side is more + // difficult and we'll do it below.) + if t.columnsSelectable && t.selectedColumn >= t.fixedColumns && t.selectedColumn < t.fixedColumns+t.columnOffset { + t.columnOffset = t.selectedColumn - t.fixedColumns + } + if t.columnOffset < 0 { + t.columnOffset = 0 + } + if t.selectedColumn < 0 { + t.selectedColumn = 0 + } + + // Determine the indices and widths of the columns and rows which fit on the + // screen. + var ( + columns, rows, widths []int + tableHeight, tableWidth int + ) + rowStep := 1 + if t.borders { + rowStep = 2 // With borders, every table row takes two screen rows. + tableWidth = 1 // We start at the second character because of the left table border. + } + indexRow := func(row int) bool { // Determine if this row is visible, store its index. + if tableHeight >= height { + return false + } + rows = append(rows, row) + tableHeight += rowStep + return true + } + for row := 0; row < t.fixedRows && row < len(t.cells); row++ { // Do the fixed rows first. + if !indexRow(row) { + break + } + } + for row := t.fixedRows + t.rowOffset; row < len(t.cells); row++ { // Then the remaining rows. + if !indexRow(row) { + break + } + } + var ( + skipped, lastTableWidth, expansionTotal int + expansions []int + ) +ColumnLoop: + for column := 0; ; column++ { + // If we've moved beyond the right border, we stop or skip a column. + for tableWidth-1 >= width { // -1 because we include one extra column if the separator falls on the right end of the box. + // We've moved beyond the available space. + if column < t.fixedColumns { + break ColumnLoop // We're in the fixed area. We're done. + } + if !t.columnsSelectable && skipped >= t.columnOffset { + break ColumnLoop // There is no selection and we've already reached the offset. + } + if t.columnsSelectable && t.selectedColumn-skipped == t.fixedColumns { + break ColumnLoop // The selected column reached the leftmost point before disappearing. + } + if t.columnsSelectable && skipped >= t.columnOffset && + (t.selectedColumn < column && lastTableWidth < width-1 && tableWidth < width-1 || t.selectedColumn < column-1) { + break ColumnLoop // We've skipped as many as requested and the selection is visible. + } + if len(columns) <= t.fixedColumns { + break // Nothing to skip. + } + + // We need to skip a column. + skipped++ + lastTableWidth -= widths[t.fixedColumns] + 1 + tableWidth -= widths[t.fixedColumns] + 1 + columns = append(columns[:t.fixedColumns], columns[t.fixedColumns+1:]...) + widths = append(widths[:t.fixedColumns], widths[t.fixedColumns+1:]...) + expansions = append(expansions[:t.fixedColumns], expansions[t.fixedColumns+1:]...) + } + + // What's this column's width (without expansion)? + maxWidth := -1 + expansion := 0 + for _, row := range rows { + if cell := getCell(row, column); cell != nil { + _, _, _, _, cellWidth := decomposeString(cell.Text) + if cell.MaxWidth > 0 && cell.MaxWidth < cellWidth { + cellWidth = cell.MaxWidth + } + if cellWidth > maxWidth { + maxWidth = cellWidth + } + if cell.Expansion > expansion { + expansion = cell.Expansion + } + } + } + if maxWidth < 0 { + break // No more cells found in this column. + } + + // Store new column info at the end. + columns = append(columns, column) + widths = append(widths, maxWidth) + lastTableWidth = tableWidth + tableWidth += maxWidth + 1 + expansions = append(expansions, expansion) + expansionTotal += expansion + } + t.columnOffset = skipped + + // If we have space left, distribute it. + if tableWidth < width { + toDistribute := width - tableWidth + for index, expansion := range expansions { + if expansionTotal <= 0 { + break + } + expWidth := toDistribute * expansion / expansionTotal + widths[index] += expWidth + toDistribute -= expWidth + expansionTotal -= expansion + } + } + + // Helper function which draws border runes. + borderStyle := tcell.StyleDefault.Background(t.backgroundColor).Foreground(t.bordersColor) + drawBorder := func(colX, rowY int, ch rune) { + screen.SetContent(x+colX, y+rowY, ch, nil, borderStyle) + } + + // Draw the cells (and borders). + var columnX int + if !t.borders { + columnX-- + } + for columnIndex, column := range columns { + columnWidth := widths[columnIndex] + for rowY, row := range rows { + if t.borders { + // Draw borders. + rowY *= 2 + for pos := 0; pos < columnWidth && columnX+1+pos < width; pos++ { + drawBorder(columnX+pos+1, rowY, Borders.Horizontal) + } + ch := Borders.Cross + if columnIndex == 0 { + if rowY == 0 { + ch = Borders.TopLeft + } else { + ch = Borders.LeftT + } + } else if rowY == 0 { + ch = Borders.TopT + } + drawBorder(columnX, rowY, ch) + rowY++ + if rowY >= height { + break // No space for the text anymore. + } + drawBorder(columnX, rowY, Borders.Vertical) + } else if columnIndex > 0 { + // Draw separator. + drawBorder(columnX, rowY, t.separator) + } + + // Get the cell. + cell := getCell(row, column) + if cell == nil { + continue + } + + // Draw text. + finalWidth := columnWidth + if columnX+1+columnWidth >= width { + finalWidth = width - columnX - 1 + } + cell.x, cell.y, cell.width = x+columnX+1, y+rowY, finalWidth + _, printed := printWithStyle(screen, cell.Text, x+columnX+1, y+rowY, finalWidth, cell.Align, tcell.StyleDefault.Foreground(cell.Color)|tcell.Style(cell.Attributes)) + if StringWidth(cell.Text)-printed > 0 && printed > 0 { + _, _, style, _ := screen.GetContent(x+columnX+1+finalWidth-1, y+rowY) + printWithStyle(screen, string(SemigraphicsHorizontalEllipsis), x+columnX+1+finalWidth-1, y+rowY, 1, AlignLeft, style) + } + } + + // Draw bottom border. + if rowY := 2 * len(rows); t.borders && rowY < height { + for pos := 0; pos < columnWidth && columnX+1+pos < width; pos++ { + drawBorder(columnX+pos+1, rowY, Borders.Horizontal) + } + ch := Borders.BottomT + if columnIndex == 0 { + ch = Borders.BottomLeft + } + drawBorder(columnX, rowY, ch) + } + + columnX += columnWidth + 1 + } + + // Draw right border. + if t.borders && len(t.cells) > 0 && columnX < width { + for rowY := range rows { + rowY *= 2 + if rowY+1 < height { + drawBorder(columnX, rowY+1, Borders.Vertical) + } + ch := Borders.RightT + if rowY == 0 { + ch = Borders.TopRight + } + drawBorder(columnX, rowY, ch) + } + if rowY := 2 * len(rows); rowY < height { + drawBorder(columnX, rowY, Borders.BottomRight) + } + } + + // Helper function which colors the background of a box. + // backgroundColor == tcell.ColorDefault => Don't color the background. + // textColor == tcell.ColorDefault => Don't change the text color. + // attr == 0 => Don't change attributes. + // invert == true => Ignore attr, set text to backgroundColor or t.backgroundColor; + // set background to textColor. + colorBackground := func(fromX, fromY, w, h int, backgroundColor, textColor tcell.Color, attr tcell.AttrMask, invert bool) { + for by := 0; by < h && fromY+by < y+height; by++ { + for bx := 0; bx < w && fromX+bx < x+width; bx++ { + m, c, style, _ := screen.GetContent(fromX+bx, fromY+by) + fg, bg, a := style.Decompose() + if invert { + if fg == textColor || fg == t.bordersColor { + fg = backgroundColor + } + if fg == tcell.ColorDefault { + fg = t.backgroundColor + } + style = style.Background(textColor).Foreground(fg) + } else { + if backgroundColor != tcell.ColorDefault { + bg = backgroundColor + } + if textColor != tcell.ColorDefault { + fg = textColor + } + if attr != 0 { + a = attr + } + style = style.Background(bg).Foreground(fg) | tcell.Style(a) + } + screen.SetContent(fromX+bx, fromY+by, m, c, style) + } + } + } + + // Color the cell backgrounds. To avoid undesirable artefacts, we combine + // the drawing of a cell by background color, selected cells last. + type cellInfo struct { + x, y, w, h int + text tcell.Color + selected bool + } + cellsByBackgroundColor := make(map[tcell.Color][]*cellInfo) + var backgroundColors []tcell.Color + for rowY, row := range rows { + columnX := 0 + rowSelected := t.rowsSelectable && !t.columnsSelectable && row == t.selectedRow + for columnIndex, column := range columns { + columnWidth := widths[columnIndex] + cell := getCell(row, column) + if cell == nil { + continue + } + bx, by, bw, bh := x+columnX, y+rowY, columnWidth+1, 1 + if t.borders { + by = y + rowY*2 + bw++ + bh = 3 + } + columnSelected := t.columnsSelectable && !t.rowsSelectable && column == t.selectedColumn + cellSelected := !cell.NotSelectable && (columnSelected || rowSelected || t.rowsSelectable && t.columnsSelectable && column == t.selectedColumn && row == t.selectedRow) + entries, ok := cellsByBackgroundColor[cell.BackgroundColor] + cellsByBackgroundColor[cell.BackgroundColor] = append(entries, &cellInfo{ + x: bx, + y: by, + w: bw, + h: bh, + text: cell.Color, + selected: cellSelected, + }) + if !ok { + backgroundColors = append(backgroundColors, cell.BackgroundColor) + } + columnX += columnWidth + 1 + } + } + sort.Slice(backgroundColors, func(i int, j int) bool { + // Draw brightest colors last (i.e. on top). + r, g, b := backgroundColors[i].RGB() + c := colorful.Color{R: float64(r) / 255, G: float64(g) / 255, B: float64(b) / 255} + _, _, li := c.Hcl() + r, g, b = backgroundColors[j].RGB() + c = colorful.Color{R: float64(r) / 255, G: float64(g) / 255, B: float64(b) / 255} + _, _, lj := c.Hcl() + return li < lj + }) + selFg, selBg, selAttr := t.selectedStyle.Decompose() + for _, bgColor := range backgroundColors { + entries := cellsByBackgroundColor[bgColor] + for _, cell := range entries { + if cell.selected { + if t.selectedStyle != 0 { + defer colorBackground(cell.x, cell.y, cell.w, cell.h, selBg, selFg, selAttr, false) + } else { + defer colorBackground(cell.x, cell.y, cell.w, cell.h, bgColor, cell.text, 0, true) + } + } else { + colorBackground(cell.x, cell.y, cell.w, cell.h, bgColor, tcell.ColorDefault, 0, false) + } + } + } +} + +// InputHandler returns the handler for this primitive. +func (t *Table) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return t.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + key := event.Key() + + if (!t.rowsSelectable && !t.columnsSelectable && key == tcell.KeyEnter) || + key == tcell.KeyEscape || + key == tcell.KeyTab || + key == tcell.KeyBacktab { + if t.done != nil { + t.done(key) + } + return + } + + // Movement functions. + previouslySelectedRow, previouslySelectedColumn := t.selectedRow, t.selectedColumn + var ( + getCell = func(row, column int) *TableCell { + if row < 0 || column < 0 || row >= len(t.cells) || column >= len(t.cells[row]) { + return nil + } + return t.cells[row][column] + } + + previous = func() { + for t.selectedRow >= 0 { + cell := getCell(t.selectedRow, t.selectedColumn) + if cell == nil || !cell.NotSelectable { + return + } + t.selectedColumn-- + if t.selectedColumn < 0 { + t.selectedColumn = t.lastColumn + t.selectedRow-- + } + } + } + + next = func() { + if t.selectedColumn > t.lastColumn { + t.selectedColumn = 0 + t.selectedRow++ + if t.selectedRow >= len(t.cells) { + t.selectedRow = len(t.cells) - 1 + } + } + for t.selectedRow < len(t.cells) { + cell := getCell(t.selectedRow, t.selectedColumn) + if cell == nil || !cell.NotSelectable { + return + } + t.selectedColumn++ + if t.selectedColumn > t.lastColumn { + t.selectedColumn = 0 + t.selectedRow++ + } + } + t.selectedColumn = t.lastColumn + t.selectedRow = len(t.cells) - 1 + previous() + } + + home = func() { + if t.rowsSelectable { + t.selectedRow = 0 + t.selectedColumn = 0 + next() + } else { + t.trackEnd = false + t.rowOffset = 0 + t.columnOffset = 0 + } + } + + end = func() { + if t.rowsSelectable { + t.selectedRow = len(t.cells) - 1 + t.selectedColumn = t.lastColumn + previous() + } else { + t.trackEnd = true + t.columnOffset = 0 + } + } + + down = func() { + if t.rowsSelectable { + t.selectedRow++ + if t.selectedRow >= len(t.cells) { + t.selectedRow = len(t.cells) - 1 + } + next() + } else { + t.rowOffset++ + } + } + + up = func() { + if t.rowsSelectable { + t.selectedRow-- + if t.selectedRow < 0 { + t.selectedRow = 0 + } + previous() + } else { + t.trackEnd = false + t.rowOffset-- + } + } + + left = func() { + if t.columnsSelectable { + t.selectedColumn-- + if t.selectedColumn < 0 { + t.selectedColumn = 0 + } + previous() + } else { + t.columnOffset-- + } + } + + right = func() { + if t.columnsSelectable { + t.selectedColumn++ + if t.selectedColumn > t.lastColumn { + t.selectedColumn = t.lastColumn + } + next() + } else { + t.columnOffset++ + } + } + + pageDown = func() { + if t.rowsSelectable { + t.selectedRow += t.visibleRows + if t.selectedRow >= len(t.cells) { + t.selectedRow = len(t.cells) - 1 + } + next() + } else { + t.rowOffset += t.visibleRows + } + } + + pageUp = func() { + if t.rowsSelectable { + t.selectedRow -= t.visibleRows + if t.selectedRow < 0 { + t.selectedRow = 0 + } + previous() + } else { + t.trackEnd = false + t.rowOffset -= t.visibleRows + } + } + ) + + switch key { + case tcell.KeyRune: + switch event.Rune() { + case 'g': + home() + case 'G': + end() + case 'j': + down() + case 'k': + up() + case 'h': + left() + case 'l': + right() + } + case tcell.KeyHome: + home() + case tcell.KeyEnd: + end() + case tcell.KeyUp: + up() + case tcell.KeyDown: + down() + case tcell.KeyLeft: + left() + case tcell.KeyRight: + right() + case tcell.KeyPgDn, tcell.KeyCtrlF: + pageDown() + case tcell.KeyPgUp, tcell.KeyCtrlB: + pageUp() + case tcell.KeyEnter: + if (t.rowsSelectable || t.columnsSelectable) && t.selected != nil { + t.selected(t.selectedRow, t.selectedColumn) + } + } + + // If the selection has changed, notify the handler. + if t.selectionChanged != nil && + (t.rowsSelectable && previouslySelectedRow != t.selectedRow || + t.columnsSelectable && previouslySelectedColumn != t.selectedColumn) { + t.selectionChanged(t.selectedRow, t.selectedColumn) + } + }) +} diff --git a/vendor/github.com/rivo/tview/textview.go b/vendor/github.com/rivo/tview/textview.go new file mode 100644 index 00000000000..00ac88cd4a7 --- /dev/null +++ b/vendor/github.com/rivo/tview/textview.go @@ -0,0 +1,1018 @@ +package tview + +import ( + "bytes" + "fmt" + "regexp" + "sync" + "unicode/utf8" + + "github.com/gdamore/tcell" + colorful "github.com/lucasb-eyer/go-colorful" + runewidth "github.com/mattn/go-runewidth" +) + +// TabSize is the number of spaces with which a tab character will be replaced. +var TabSize = 4 + +// textViewIndex contains information about each line displayed in the text +// view. +type textViewIndex struct { + Line int // The index into the "buffer" variable. + Pos int // The index into the "buffer" string (byte position). + NextPos int // The (byte) index of the next character in this buffer line. + Width int // The screen width of this line. + ForegroundColor string // The starting foreground color ("" = don't change, "-" = reset). + BackgroundColor string // The starting background color ("" = don't change, "-" = reset). + Attributes string // The starting attributes ("" = don't change, "-" = reset). + Region string // The starting region ID. +} + +// TextView is a box which displays text. It implements the io.Writer interface +// so you can stream text to it. This does not trigger a redraw automatically +// but if a handler is installed via SetChangedFunc(), you can cause it to be +// redrawn. (See SetChangedFunc() for more details.) +// +// Navigation +// +// If the text view is scrollable (the default), text is kept in a buffer which +// may be larger than the screen and can be navigated similarly to Vim: +// +// - h, left arrow: Move left. +// - l, right arrow: Move right. +// - j, down arrow: Move down. +// - k, up arrow: Move up. +// - g, home: Move to the top. +// - G, end: Move to the bottom. +// - Ctrl-F, page down: Move down by one page. +// - Ctrl-B, page up: Move up by one page. +// +// If the text is not scrollable, any text above the top visible line is +// discarded. +// +// Use SetInputCapture() to override or modify keyboard input. +// +// Colors +// +// If dynamic colors are enabled via SetDynamicColors(), text color can be +// changed dynamically by embedding color strings in square brackets. This works +// the same way as anywhere else. Please see the package documentation for more +// information. +// +// Regions and Highlights +// +// If regions are enabled via SetRegions(), you can define text regions within +// the text and assign region IDs to them. Text regions start with region tags. +// Region tags are square brackets that contain a region ID in double quotes, +// for example: +// +// We define a ["rg"]region[""] here. +// +// A text region ends with the next region tag. Tags with no region ID ([""]) +// don't start new regions. They can therefore be used to mark the end of a +// region. Region IDs must satisfy the following regular expression: +// +// [a-zA-Z0-9_,;: \-\.]+ +// +// Regions can be highlighted by calling the Highlight() function with one or +// more region IDs. This can be used to display search results, for example. +// +// The ScrollToHighlight() function can be used to jump to the currently +// highlighted region once when the text view is drawn the next time. +// +// See https://github.com/rivo/tview/wiki/TextView for an example. +type TextView struct { + sync.Mutex + *Box + + // The text buffer. + buffer []string + + // The last bytes that have been received but are not part of the buffer yet. + recentBytes []byte + + // The processed line index. This is nil if the buffer has changed and needs + // to be re-indexed. + index []*textViewIndex + + // The text alignment, one of AlignLeft, AlignCenter, or AlignRight. + align int + + // Indices into the "index" slice which correspond to the first line of the + // first highlight and the last line of the last highlight. This is calculated + // during re-indexing. Set to -1 if there is no current highlight. + fromHighlight, toHighlight int + + // The screen space column of the highlight in its first line. Set to -1 if + // there is no current highlight. + posHighlight int + + // A set of region IDs that are currently highlighted. + highlights map[string]struct{} + + // The last width for which the current table is drawn. + lastWidth int + + // The screen width of the longest line in the index (not the buffer). + longestLine int + + // The index of the first line shown in the text view. + lineOffset int + + // If set to true, the text view will always remain at the end of the content. + trackEnd bool + + // The number of characters to be skipped on each line (not in wrap mode). + columnOffset int + + // The height of the content the last time the text view was drawn. + pageSize int + + // If set to true, the text view will keep a buffer of text which can be + // navigated when the text is longer than what fits into the box. + scrollable bool + + // If set to true, lines that are longer than the available width are wrapped + // onto the next line. If set to false, any characters beyond the available + // width are discarded. + wrap bool + + // If set to true and if wrap is also true, lines are split at spaces or + // after punctuation characters. + wordWrap bool + + // The (starting) color of the text. + textColor tcell.Color + + // If set to true, the text color can be changed dynamically by piping color + // strings in square brackets to the text view. + dynamicColors bool + + // If set to true, region tags can be used to define regions. + regions bool + + // A temporary flag which, when true, will automatically bring the current + // highlight(s) into the visible screen. + scrollToHighlights bool + + // An optional function which is called when the content of the text view has + // changed. + changed func() + + // An optional function which is called when the user presses one of the + // following keys: Escape, Enter, Tab, Backtab. + done func(tcell.Key) +} + +// NewTextView returns a new text view. +func NewTextView() *TextView { + return &TextView{ + Box: NewBox(), + highlights: make(map[string]struct{}), + lineOffset: -1, + scrollable: true, + align: AlignLeft, + wrap: true, + textColor: Styles.PrimaryTextColor, + regions: false, + dynamicColors: false, + } +} + +// SetScrollable sets the flag that decides whether or not the text view is +// scrollable. If true, text is kept in a buffer and can be navigated. +func (t *TextView) SetScrollable(scrollable bool) *TextView { + t.scrollable = scrollable + if !scrollable { + t.trackEnd = true + } + return t +} + +// SetWrap sets the flag that, if true, leads to lines that are longer than the +// available width being wrapped onto the next line. If false, any characters +// beyond the available width are not displayed. +func (t *TextView) SetWrap(wrap bool) *TextView { + if t.wrap != wrap { + t.index = nil + } + t.wrap = wrap + return t +} + +// SetWordWrap sets the flag that, if true and if the "wrap" flag is also true +// (see SetWrap()), wraps the line at spaces or after punctuation marks. Note +// that trailing spaces will not be printed. +// +// This flag is ignored if the "wrap" flag is false. +func (t *TextView) SetWordWrap(wrapOnWords bool) *TextView { + if t.wordWrap != wrapOnWords { + t.index = nil + } + t.wordWrap = wrapOnWords + return t +} + +// SetTextAlign sets the text alignment within the text view. This must be +// either AlignLeft, AlignCenter, or AlignRight. +func (t *TextView) SetTextAlign(align int) *TextView { + if t.align != align { + t.index = nil + } + t.align = align + return t +} + +// SetTextColor sets the initial color of the text (which can be changed +// dynamically by sending color strings in square brackets to the text view if +// dynamic colors are enabled). +func (t *TextView) SetTextColor(color tcell.Color) *TextView { + t.textColor = color + return t +} + +// SetText sets the text of this text view to the provided string. Previously +// contained text will be removed. +func (t *TextView) SetText(text string) *TextView { + t.Clear() + fmt.Fprint(t, text) + return t +} + +// SetDynamicColors sets the flag that allows the text color to be changed +// dynamically. See class description for details. +func (t *TextView) SetDynamicColors(dynamic bool) *TextView { + if t.dynamicColors != dynamic { + t.index = nil + } + t.dynamicColors = dynamic + return t +} + +// SetRegions sets the flag that allows to define regions in the text. See class +// description for details. +func (t *TextView) SetRegions(regions bool) *TextView { + if t.regions != regions { + t.index = nil + } + t.regions = regions + return t +} + +// SetChangedFunc sets a handler function which is called when the text of the +// text view has changed. This is useful when text is written to this io.Writer +// in a separate goroutine. This does not automatically cause the screen to be +// refreshed so you may want to use the "changed" handler to redraw the screen. +// +// Note that to avoid race conditions or deadlocks, there are a few rules you +// should follow: +// +// - You can call Application.Draw() from this handler. +// - You can call TextView.HasFocus() from this handler. +// - During the execution of this handler, access to any other variables from +// this primitive or any other primitive should be queued using +// Application.QueueUpdate(). +// +// See package description for details on dealing with concurrency. +func (t *TextView) SetChangedFunc(handler func()) *TextView { + t.changed = handler + return t +} + +// SetDoneFunc sets a handler which is called when the user presses on the +// following keys: Escape, Enter, Tab, Backtab. The key is passed to the +// handler. +func (t *TextView) SetDoneFunc(handler func(key tcell.Key)) *TextView { + t.done = handler + return t +} + +// ScrollTo scrolls to the specified row and column (both starting with 0). +func (t *TextView) ScrollTo(row, column int) *TextView { + if !t.scrollable { + return t + } + t.lineOffset = row + t.columnOffset = column + return t +} + +// ScrollToBeginning scrolls to the top left corner of the text if the text view +// is scrollable. +func (t *TextView) ScrollToBeginning() *TextView { + if !t.scrollable { + return t + } + t.trackEnd = false + t.lineOffset = 0 + t.columnOffset = 0 + return t +} + +// ScrollToEnd scrolls to the bottom left corner of the text if the text view +// is scrollable. Adding new rows to the end of the text view will cause it to +// scroll with the new data. +func (t *TextView) ScrollToEnd() *TextView { + if !t.scrollable { + return t + } + t.trackEnd = true + t.columnOffset = 0 + return t +} + +// GetScrollOffset returns the number of rows and columns that are skipped at +// the top left corner when the text view has been scrolled. +func (t *TextView) GetScrollOffset() (row, column int) { + return t.lineOffset, t.columnOffset +} + +// Clear removes all text from the buffer. +func (t *TextView) Clear() *TextView { + t.buffer = nil + t.recentBytes = nil + t.index = nil + return t +} + +// Highlight specifies which regions should be highlighted. See class +// description for details on regions. Empty region strings are ignored. +// +// Text in highlighted regions will be drawn inverted, i.e. with their +// background and foreground colors swapped. +// +// Calling this function will remove any previous highlights. To remove all +// highlights, call this function without any arguments. +func (t *TextView) Highlight(regionIDs ...string) *TextView { + t.highlights = make(map[string]struct{}) + for _, id := range regionIDs { + if id == "" { + continue + } + t.highlights[id] = struct{}{} + } + t.index = nil + return t +} + +// GetHighlights returns the IDs of all currently highlighted regions. +func (t *TextView) GetHighlights() (regionIDs []string) { + for id := range t.highlights { + regionIDs = append(regionIDs, id) + } + return +} + +// ScrollToHighlight will cause the visible area to be scrolled so that the +// highlighted regions appear in the visible area of the text view. This +// repositioning happens the next time the text view is drawn. It happens only +// once so you will need to call this function repeatedly to always keep +// highlighted regions in view. +// +// Nothing happens if there are no highlighted regions or if the text view is +// not scrollable. +func (t *TextView) ScrollToHighlight() *TextView { + if len(t.highlights) == 0 || !t.scrollable || !t.regions { + return t + } + t.index = nil + t.scrollToHighlights = true + t.trackEnd = false + return t +} + +// GetRegionText returns the text of the region with the given ID. If dynamic +// colors are enabled, color tags are stripped from the text. Newlines are +// always returned as '\n' runes. +// +// If the region does not exist or if regions are turned off, an empty string +// is returned. +func (t *TextView) GetRegionText(regionID string) string { + if !t.regions || regionID == "" { + return "" + } + + var ( + buffer bytes.Buffer + currentRegionID string + ) + + for _, str := range t.buffer { + // Find all color tags in this line. + var colorTagIndices [][]int + if t.dynamicColors { + colorTagIndices = colorPattern.FindAllStringIndex(str, -1) + } + + // Find all regions in this line. + var ( + regionIndices [][]int + regions [][]string + ) + if t.regions { + regionIndices = regionPattern.FindAllStringIndex(str, -1) + regions = regionPattern.FindAllStringSubmatch(str, -1) + } + + // Analyze this line. + var currentTag, currentRegion int + for pos, ch := range str { + // Skip any color tags. + if currentTag < len(colorTagIndices) && pos >= colorTagIndices[currentTag][0] && pos < colorTagIndices[currentTag][1] { + if pos == colorTagIndices[currentTag][1]-1 { + currentTag++ + } + continue + } + + // Skip any regions. + if currentRegion < len(regionIndices) && pos >= regionIndices[currentRegion][0] && pos < regionIndices[currentRegion][1] { + if pos == regionIndices[currentRegion][1]-1 { + if currentRegionID == regionID { + // This is the end of the requested region. We're done. + return buffer.String() + } + currentRegionID = regions[currentRegion][1] + currentRegion++ + } + continue + } + + // Add this rune. + if currentRegionID == regionID { + buffer.WriteRune(ch) + } + } + + // Add newline. + if currentRegionID == regionID { + buffer.WriteRune('\n') + } + } + + return escapePattern.ReplaceAllString(buffer.String(), `[$1$2]`) +} + +// Focus is called when this primitive receives focus. +func (t *TextView) Focus(delegate func(p Primitive)) { + // Implemented here with locking because this is used by layout primitives. + t.Lock() + defer t.Unlock() + t.hasFocus = true +} + +// HasFocus returns whether or not this primitive has focus. +func (t *TextView) HasFocus() bool { + // Implemented here with locking because this may be used in the "changed" + // callback. + t.Lock() + defer t.Unlock() + return t.hasFocus +} + +// Write lets us implement the io.Writer interface. Tab characters will be +// replaced with TabSize space characters. A "\n" or "\r\n" will be interpreted +// as a new line. +func (t *TextView) Write(p []byte) (n int, err error) { + // Notify at the end. + t.Lock() + changed := t.changed + t.Unlock() + if changed != nil { + defer changed() // Deadlocks may occur if we lock here. + } + + t.Lock() + defer t.Unlock() + + // Copy data over. + newBytes := append(t.recentBytes, p...) + t.recentBytes = nil + + // If we have a trailing invalid UTF-8 byte, we'll wait. + if r, _ := utf8.DecodeLastRune(p); r == utf8.RuneError { + t.recentBytes = newBytes + return len(p), nil + } + + // If we have a trailing open dynamic color, exclude it. + if t.dynamicColors { + openColor := regexp.MustCompile(`\[([a-zA-Z]*|#[0-9a-zA-Z]*)$`) + location := openColor.FindIndex(newBytes) + if location != nil { + t.recentBytes = newBytes[location[0]:] + newBytes = newBytes[:location[0]] + } + } + + // If we have a trailing open region, exclude it. + if t.regions { + openRegion := regexp.MustCompile(`\["[a-zA-Z0-9_,;: \-\.]*"?$`) + location := openRegion.FindIndex(newBytes) + if location != nil { + t.recentBytes = newBytes[location[0]:] + newBytes = newBytes[:location[0]] + } + } + + // Transform the new bytes into strings. + newLine := regexp.MustCompile(`\r?\n`) + newBytes = bytes.Replace(newBytes, []byte{'\t'}, bytes.Repeat([]byte{' '}, TabSize), -1) + for index, line := range newLine.Split(string(newBytes), -1) { + if index == 0 { + if len(t.buffer) == 0 { + t.buffer = []string{line} + } else { + t.buffer[len(t.buffer)-1] += line + } + } else { + t.buffer = append(t.buffer, line) + } + } + + // Reset the index. + t.index = nil + + return len(p), nil +} + +// reindexBuffer re-indexes the buffer such that we can use it to easily draw +// the buffer onto the screen. Each line in the index will contain a pointer +// into the buffer from which on we will print text. It will also contain the +// color with which the line starts. +func (t *TextView) reindexBuffer(width int) { + if t.index != nil { + return // Nothing has changed. We can still use the current index. + } + t.index = nil + t.fromHighlight, t.toHighlight, t.posHighlight = -1, -1, -1 + + // If there's no space, there's no index. + if width < 1 { + return + } + + // Initial states. + regionID := "" + var highlighted bool + + // Go through each line in the buffer. + for bufferIndex, str := range t.buffer { + // Find all color tags in this line. Then remove them. + var ( + colorTagIndices [][]int + colorTags [][]string + escapeIndices [][]int + ) + strippedStr := str + if t.dynamicColors { + colorTagIndices, colorTags, escapeIndices, strippedStr, _ = decomposeString(str) + } + + // Find all regions in this line. Then remove them. + var ( + regionIndices [][]int + regions [][]string + ) + if t.regions { + regionIndices = regionPattern.FindAllStringIndex(str, -1) + regions = regionPattern.FindAllStringSubmatch(str, -1) + strippedStr = regionPattern.ReplaceAllString(strippedStr, "") + } + + // We don't need the original string anymore for now. + str = strippedStr + + // Split the line if required. + var splitLines []string + if t.wrap && len(str) > 0 { + for len(str) > 0 { + extract := runewidth.Truncate(str, width, "") + if t.wordWrap && len(extract) < len(str) { + // Add any spaces from the next line. + if spaces := spacePattern.FindStringIndex(str[len(extract):]); spaces != nil && spaces[0] == 0 { + extract = str[:len(extract)+spaces[1]] + } + + // Can we split before the mandatory end? + matches := boundaryPattern.FindAllStringIndex(extract, -1) + if len(matches) > 0 { + // Yes. Let's split there. + extract = extract[:matches[len(matches)-1][1]] + } + } + splitLines = append(splitLines, extract) + str = str[len(extract):] + } + } else { + // No need to split the line. + splitLines = []string{str} + } + + // Create index from split lines. + var ( + originalPos, colorPos, regionPos, escapePos int + foregroundColor, backgroundColor, attributes string + ) + for _, splitLine := range splitLines { + line := &textViewIndex{ + Line: bufferIndex, + Pos: originalPos, + ForegroundColor: foregroundColor, + BackgroundColor: backgroundColor, + Attributes: attributes, + Region: regionID, + } + + // Shift original position with tags. + lineLength := len(splitLine) + remainingLength := lineLength + tagEnd := originalPos + totalTagLength := 0 + for { + // Which tag comes next? + nextTag := make([][3]int, 0, 3) + if colorPos < len(colorTagIndices) { + nextTag = append(nextTag, [3]int{colorTagIndices[colorPos][0], colorTagIndices[colorPos][1], 0}) // 0 = color tag. + } + if regionPos < len(regionIndices) { + nextTag = append(nextTag, [3]int{regionIndices[regionPos][0], regionIndices[regionPos][1], 1}) // 1 = region tag. + } + if escapePos < len(escapeIndices) { + nextTag = append(nextTag, [3]int{escapeIndices[escapePos][0], escapeIndices[escapePos][1], 2}) // 2 = escape tag. + } + minPos := -1 + tagIndex := -1 + for index, pair := range nextTag { + if minPos < 0 || pair[0] < minPos { + minPos = pair[0] + tagIndex = index + } + } + + // Is the next tag in range? + if tagIndex < 0 || minPos >= tagEnd+remainingLength { + break // No. We're done with this line. + } + + // Advance. + strippedTagStart := nextTag[tagIndex][0] - originalPos - totalTagLength + tagEnd = nextTag[tagIndex][1] + tagLength := tagEnd - nextTag[tagIndex][0] + if nextTag[tagIndex][2] == 2 { + tagLength = 1 + } + totalTagLength += tagLength + remainingLength = lineLength - (tagEnd - originalPos - totalTagLength) + + // Process the tag. + switch nextTag[tagIndex][2] { + case 0: + // Process color tags. + foregroundColor, backgroundColor, attributes = styleFromTag(foregroundColor, backgroundColor, attributes, colorTags[colorPos]) + colorPos++ + case 1: + // Process region tags. + regionID = regions[regionPos][1] + _, highlighted = t.highlights[regionID] + + // Update highlight range. + if highlighted { + line := len(t.index) + if t.fromHighlight < 0 { + t.fromHighlight, t.toHighlight = line, line + t.posHighlight = runewidth.StringWidth(splitLine[:strippedTagStart]) + } else if line > t.toHighlight { + t.toHighlight = line + } + } + + regionPos++ + case 2: + // Process escape tags. + escapePos++ + } + } + + // Advance to next line. + originalPos += lineLength + totalTagLength + + // Append this line. + line.NextPos = originalPos + line.Width = runewidth.StringWidth(splitLine) + t.index = append(t.index, line) + } + + // Word-wrapped lines may have trailing whitespace. Remove it. + if t.wrap && t.wordWrap { + for _, line := range t.index { + str := t.buffer[line.Line][line.Pos:line.NextPos] + spaces := spacePattern.FindAllStringIndex(str, -1) + if spaces != nil && spaces[len(spaces)-1][1] == len(str) { + oldNextPos := line.NextPos + line.NextPos -= spaces[len(spaces)-1][1] - spaces[len(spaces)-1][0] + line.Width -= runewidth.StringWidth(t.buffer[line.Line][line.NextPos:oldNextPos]) + } + } + } + } + + // Calculate longest line. + t.longestLine = 0 + for _, line := range t.index { + if line.Width > t.longestLine { + t.longestLine = line.Width + } + } +} + +// Draw draws this primitive onto the screen. +func (t *TextView) Draw(screen tcell.Screen) { + t.Lock() + defer t.Unlock() + t.Box.Draw(screen) + + // Get the available size. + x, y, width, height := t.GetInnerRect() + t.pageSize = height + + // If the width has changed, we need to reindex. + if width != t.lastWidth && t.wrap { + t.index = nil + } + t.lastWidth = width + + // Re-index. + t.reindexBuffer(width) + + // If we don't have an index, there's nothing to draw. + if t.index == nil { + return + } + + // Move to highlighted regions. + if t.regions && t.scrollToHighlights && t.fromHighlight >= 0 { + // Do we fit the entire height? + if t.toHighlight-t.fromHighlight+1 < height { + // Yes, let's center the highlights. + t.lineOffset = (t.fromHighlight + t.toHighlight - height) / 2 + } else { + // No, let's move to the start of the highlights. + t.lineOffset = t.fromHighlight + } + + // If the highlight is too far to the right, move it to the middle. + if t.posHighlight-t.columnOffset > 3*width/4 { + t.columnOffset = t.posHighlight - width/2 + } + + // If the highlight is off-screen on the left, move it on-screen. + if t.posHighlight-t.columnOffset < 0 { + t.columnOffset = t.posHighlight - width/4 + } + } + t.scrollToHighlights = false + + // Adjust line offset. + if t.lineOffset+height > len(t.index) { + t.trackEnd = true + } + if t.trackEnd { + t.lineOffset = len(t.index) - height + } + if t.lineOffset < 0 { + t.lineOffset = 0 + } + + // Adjust column offset. + if t.align == AlignLeft { + if t.columnOffset+width > t.longestLine { + t.columnOffset = t.longestLine - width + } + if t.columnOffset < 0 { + t.columnOffset = 0 + } + } else if t.align == AlignRight { + if t.columnOffset-width < -t.longestLine { + t.columnOffset = width - t.longestLine + } + if t.columnOffset > 0 { + t.columnOffset = 0 + } + } else { // AlignCenter. + half := (t.longestLine - width) / 2 + if half > 0 { + if t.columnOffset > half { + t.columnOffset = half + } + if t.columnOffset < -half { + t.columnOffset = -half + } + } else { + t.columnOffset = 0 + } + } + + // Draw the buffer. + defaultStyle := tcell.StyleDefault.Foreground(t.textColor) + for line := t.lineOffset; line < len(t.index); line++ { + // Are we done? + if line-t.lineOffset >= height { + break + } + + // Get the text for this line. + index := t.index[line] + text := t.buffer[index.Line][index.Pos:index.NextPos] + foregroundColor := index.ForegroundColor + backgroundColor := index.BackgroundColor + attributes := index.Attributes + regionID := index.Region + + // Get color tags. + var ( + colorTagIndices [][]int + colorTags [][]string + escapeIndices [][]int + ) + strippedText := text + if t.dynamicColors { + colorTagIndices, colorTags, escapeIndices, strippedText, _ = decomposeString(text) + } + + // Get regions. + var ( + regionIndices [][]int + regions [][]string + ) + if t.regions { + regionIndices = regionPattern.FindAllStringIndex(text, -1) + regions = regionPattern.FindAllStringSubmatch(text, -1) + strippedText = regionPattern.ReplaceAllString(strippedText, "") + if !t.dynamicColors { + escapeIndices = escapePattern.FindAllStringIndex(text, -1) + strippedText = string(escapePattern.ReplaceAllString(strippedText, "[$1$2]")) + } + } + + // Calculate the position of the line. + var skip, posX int + if t.align == AlignLeft { + posX = -t.columnOffset + } else if t.align == AlignRight { + posX = width - index.Width - t.columnOffset + } else { // AlignCenter. + posX = (width-index.Width)/2 - t.columnOffset + } + if posX < 0 { + skip = -posX + posX = 0 + } + + // Print the line. + var colorPos, regionPos, escapePos, tagOffset, skipped int + iterateString(strippedText, func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + // Process tags. + for { + if colorPos < len(colorTags) && textPos+tagOffset >= colorTagIndices[colorPos][0] && textPos+tagOffset < colorTagIndices[colorPos][1] { + // Get the color. + foregroundColor, backgroundColor, attributes = styleFromTag(foregroundColor, backgroundColor, attributes, colorTags[colorPos]) + tagOffset += colorTagIndices[colorPos][1] - colorTagIndices[colorPos][0] + colorPos++ + } else if regionPos < len(regionIndices) && textPos+tagOffset >= regionIndices[regionPos][0] && textPos+tagOffset < regionIndices[regionPos][1] { + // Get the region. + regionID = regions[regionPos][1] + tagOffset += regionIndices[regionPos][1] - regionIndices[regionPos][0] + regionPos++ + } else { + break + } + } + + // Skip the second-to-last character of an escape tag. + if escapePos < len(escapeIndices) && textPos+tagOffset == escapeIndices[escapePos][1]-2 { + tagOffset++ + escapePos++ + } + + // Mix the existing style with the new style. + _, _, existingStyle, _ := screen.GetContent(x+posX, y+line-t.lineOffset) + _, background, _ := existingStyle.Decompose() + style := overlayStyle(background, defaultStyle, foregroundColor, backgroundColor, attributes) + + // Do we highlight this character? + var highlighted bool + if len(regionID) > 0 { + if _, ok := t.highlights[regionID]; ok { + highlighted = true + } + } + if highlighted { + fg, bg, _ := style.Decompose() + if bg == tcell.ColorDefault { + r, g, b := fg.RGB() + c := colorful.Color{R: float64(r) / 255, G: float64(g) / 255, B: float64(b) / 255} + _, _, li := c.Hcl() + if li < .5 { + bg = tcell.ColorWhite + } else { + bg = tcell.ColorBlack + } + } + style = style.Background(fg).Foreground(bg) + } + + // Skip to the right. + if !t.wrap && skipped < skip { + skipped += screenWidth + return false + } + + // Stop at the right border. + if posX+screenWidth > width { + return true + } + + // Draw the character. + for offset := screenWidth - 1; offset >= 0; offset-- { + if offset == 0 { + screen.SetContent(x+posX+offset, y+line-t.lineOffset, main, comb, style) + } else { + screen.SetContent(x+posX+offset, y+line-t.lineOffset, ' ', nil, style) + } + } + + // Advance. + posX += screenWidth + return false + }) + } + + // If this view is not scrollable, we'll purge the buffer of lines that have + // scrolled out of view. + if !t.scrollable && t.lineOffset > 0 { + t.buffer = t.buffer[t.index[t.lineOffset].Line:] + t.index = nil + } +} + +// InputHandler returns the handler for this primitive. +func (t *TextView) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return t.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + key := event.Key() + + if key == tcell.KeyEscape || key == tcell.KeyEnter || key == tcell.KeyTab || key == tcell.KeyBacktab { + if t.done != nil { + t.done(key) + } + return + } + + if !t.scrollable { + return + } + + switch key { + case tcell.KeyRune: + switch event.Rune() { + case 'g': // Home. + t.trackEnd = false + t.lineOffset = 0 + t.columnOffset = 0 + case 'G': // End. + t.trackEnd = true + t.columnOffset = 0 + case 'j': // Down. + t.lineOffset++ + case 'k': // Up. + t.trackEnd = false + t.lineOffset-- + case 'h': // Left. + t.columnOffset-- + case 'l': // Right. + t.columnOffset++ + } + case tcell.KeyHome: + t.trackEnd = false + t.lineOffset = 0 + t.columnOffset = 0 + case tcell.KeyEnd: + t.trackEnd = true + t.columnOffset = 0 + case tcell.KeyUp: + t.trackEnd = false + t.lineOffset-- + case tcell.KeyDown: + t.lineOffset++ + case tcell.KeyLeft: + t.columnOffset-- + case tcell.KeyRight: + t.columnOffset++ + case tcell.KeyPgDn, tcell.KeyCtrlF: + t.lineOffset += t.pageSize + case tcell.KeyPgUp, tcell.KeyCtrlB: + t.trackEnd = false + t.lineOffset -= t.pageSize + } + }) +} diff --git a/vendor/github.com/rivo/tview/treeview.go b/vendor/github.com/rivo/tview/treeview.go new file mode 100644 index 00000000000..79d26d51835 --- /dev/null +++ b/vendor/github.com/rivo/tview/treeview.go @@ -0,0 +1,689 @@ +package tview + +import ( + "github.com/gdamore/tcell" +) + +// Tree navigation events. +const ( + treeNone int = iota + treeHome + treeEnd + treeUp + treeDown + treePageUp + treePageDown +) + +// TreeNode represents one node in a tree view. +type TreeNode struct { + // The reference object. + reference interface{} + + // This node's child nodes. + children []*TreeNode + + // The item's text. + text string + + // The text color. + color tcell.Color + + // Whether or not this node can be selected. + selectable bool + + // Whether or not this node's children should be displayed. + expanded bool + + // The additional horizontal indent of this node's text. + indent int + + // An optional function which is called when the user selects this node. + selected func() + + // Temporary member variables. + parent *TreeNode // The parent node (nil for the root). + level int // The hierarchy level (0 for the root, 1 for its children, and so on). + graphicsX int // The x-coordinate of the left-most graphics rune. + textX int // The x-coordinate of the first rune of the text. +} + +// NewTreeNode returns a new tree node. +func NewTreeNode(text string) *TreeNode { + return &TreeNode{ + text: text, + color: Styles.PrimaryTextColor, + indent: 2, + expanded: true, + selectable: true, + } +} + +// Walk traverses this node's subtree in depth-first, pre-order (NLR) order and +// calls the provided callback function on each traversed node (which includes +// this node) with the traversed node and its parent node (nil for this node). +// The callback returns whether traversal should continue with the traversed +// node's child nodes (true) or not recurse any deeper (false). +func (n *TreeNode) Walk(callback func(node, parent *TreeNode) bool) *TreeNode { + n.parent = nil + nodes := []*TreeNode{n} + for len(nodes) > 0 { + // Pop the top node and process it. + node := nodes[len(nodes)-1] + nodes = nodes[:len(nodes)-1] + if !callback(node, node.parent) { + // Don't add any children. + continue + } + + // Add children in reverse order. + for index := len(node.children) - 1; index >= 0; index-- { + node.children[index].parent = node + nodes = append(nodes, node.children[index]) + } + } + + return n +} + +// SetReference allows you to store a reference of any type in this node. This +// will allow you to establish a mapping between the TreeView hierarchy and your +// internal tree structure. +func (n *TreeNode) SetReference(reference interface{}) *TreeNode { + n.reference = reference + return n +} + +// GetReference returns this node's reference object. +func (n *TreeNode) GetReference() interface{} { + return n.reference +} + +// SetChildren sets this node's child nodes. +func (n *TreeNode) SetChildren(childNodes []*TreeNode) *TreeNode { + n.children = childNodes + return n +} + +// GetText returns this node's text. +func (n *TreeNode) GetText() string { + return n.text +} + +// GetChildren returns this node's children. +func (n *TreeNode) GetChildren() []*TreeNode { + return n.children +} + +// ClearChildren removes all child nodes from this node. +func (n *TreeNode) ClearChildren() *TreeNode { + n.children = nil + return n +} + +// AddChild adds a new child node to this node. +func (n *TreeNode) AddChild(node *TreeNode) *TreeNode { + n.children = append(n.children, node) + return n +} + +// SetSelectable sets a flag indicating whether this node can be selected by +// the user. +func (n *TreeNode) SetSelectable(selectable bool) *TreeNode { + n.selectable = selectable + return n +} + +// SetSelectedFunc sets a function which is called when the user selects this +// node by hitting Enter when it is selected. +func (n *TreeNode) SetSelectedFunc(handler func()) *TreeNode { + n.selected = handler + return n +} + +// SetExpanded sets whether or not this node's child nodes should be displayed. +func (n *TreeNode) SetExpanded(expanded bool) *TreeNode { + n.expanded = expanded + return n +} + +// Expand makes the child nodes of this node appear. +func (n *TreeNode) Expand() *TreeNode { + n.expanded = true + return n +} + +// Collapse makes the child nodes of this node disappear. +func (n *TreeNode) Collapse() *TreeNode { + n.expanded = false + return n +} + +// ExpandAll expands this node and all descendent nodes. +func (n *TreeNode) ExpandAll() *TreeNode { + n.Walk(func(node, parent *TreeNode) bool { + node.expanded = true + return true + }) + return n +} + +// CollapseAll collapses this node and all descendent nodes. +func (n *TreeNode) CollapseAll() *TreeNode { + n.Walk(func(node, parent *TreeNode) bool { + n.expanded = false + return true + }) + return n +} + +// IsExpanded returns whether the child nodes of this node are visible. +func (n *TreeNode) IsExpanded() bool { + return n.expanded +} + +// SetText sets the node's text which is displayed. +func (n *TreeNode) SetText(text string) *TreeNode { + n.text = text + return n +} + +// SetColor sets the node's text color. +func (n *TreeNode) SetColor(color tcell.Color) *TreeNode { + n.color = color + return n +} + +// SetIndent sets an additional indentation for this node's text. A value of 0 +// keeps the text as far left as possible with a minimum of line graphics. Any +// value greater than that moves the text to the right. +func (n *TreeNode) SetIndent(indent int) *TreeNode { + n.indent = indent + return n +} + +// TreeView displays tree structures. A tree consists of nodes (TreeNode +// objects) where each node has zero or more child nodes and exactly one parent +// node (except for the root node which has no parent node). +// +// The SetRoot() function is used to specify the root of the tree. Other nodes +// are added locally to the root node or any of its descendents. See the +// TreeNode documentation for details on node attributes. (You can use +// SetReference() to store a reference to nodes of your own tree structure.) +// +// Nodes can be selected by calling SetCurrentNode(). The user can navigate the +// selection or the tree by using the following keys: +// +// - j, down arrow, right arrow: Move (the selection) down by one node. +// - k, up arrow, left arrow: Move (the selection) up by one node. +// - g, home: Move (the selection) to the top. +// - G, end: Move (the selection) to the bottom. +// - Ctrl-F, page down: Move (the selection) down by one page. +// - Ctrl-B, page up: Move (the selection) up by one page. +// +// Selected nodes can trigger the "selected" callback when the user hits Enter. +// +// The root node corresponds to level 0, its children correspond to level 1, +// their children to level 2, and so on. Per default, the first level that is +// displayed is 0, i.e. the root node. You can call SetTopLevel() to hide +// levels. +// +// If graphics are turned on (see SetGraphics()), lines indicate the tree's +// hierarchy. Alternative (or additionally), you can set different prefixes +// using SetPrefixes() for different levels, for example to display hierarchical +// bullet point lists. +// +// See https://github.com/rivo/tview/wiki/TreeView for an example. +type TreeView struct { + *Box + + // The root node. + root *TreeNode + + // The currently selected node or nil if no node is selected. + currentNode *TreeNode + + // The movement to be performed during the call to Draw(), one of the + // constants defined above. + movement int + + // The top hierarchical level shown. (0 corresponds to the root level.) + topLevel int + + // Strings drawn before the nodes, based on their level. + prefixes []string + + // Vertical scroll offset. + offsetY int + + // If set to true, all node texts will be aligned horizontally. + align bool + + // If set to true, the tree structure is drawn using lines. + graphics bool + + // The color of the lines. + graphicsColor tcell.Color + + // An optional function which is called when the user has navigated to a new + // tree node. + changed func(node *TreeNode) + + // An optional function which is called when a tree item was selected. + selected func(node *TreeNode) + + // The visible nodes, top-down, as set by process(). + nodes []*TreeNode +} + +// NewTreeView returns a new tree view. +func NewTreeView() *TreeView { + return &TreeView{ + Box: NewBox(), + graphics: true, + graphicsColor: Styles.GraphicsColor, + } +} + +// SetRoot sets the root node of the tree. +func (t *TreeView) SetRoot(root *TreeNode) *TreeView { + t.root = root + return t +} + +// GetRoot returns the root node of the tree. If no such node was previously +// set, nil is returned. +func (t *TreeView) GetRoot() *TreeNode { + return t.root +} + +// SetCurrentNode sets the currently selected node. Provide nil to clear all +// selections. Selected nodes must be visible and selectable, or else the +// selection will be changed to the top-most selectable and visible node. +// +// This function does NOT trigger the "changed" callback. +func (t *TreeView) SetCurrentNode(node *TreeNode) *TreeView { + t.currentNode = node + return t +} + +// GetCurrentNode returns the currently selected node or nil of no node is +// currently selected. +func (t *TreeView) GetCurrentNode() *TreeNode { + return t.currentNode +} + +// SetTopLevel sets the first tree level that is visible with 0 referring to the +// root, 1 to the root's child nodes, and so on. Nodes above the top level are +// not displayed. +func (t *TreeView) SetTopLevel(topLevel int) *TreeView { + t.topLevel = topLevel + return t +} + +// SetPrefixes defines the strings drawn before the nodes' texts. This is a +// slice of strings where each element corresponds to a node's hierarchy level, +// i.e. 0 for the root, 1 for the root's children, and so on (levels will +// cycle). +// +// For example, to display a hierarchical list with bullet points: +// +// treeView.SetGraphics(false). +// SetPrefixes([]string{"* ", "- ", "x "}) +func (t *TreeView) SetPrefixes(prefixes []string) *TreeView { + t.prefixes = prefixes + return t +} + +// SetAlign controls the horizontal alignment of the node texts. If set to true, +// all texts except that of top-level nodes will be placed in the same column. +// If set to false, they will indent with the hierarchy. +func (t *TreeView) SetAlign(align bool) *TreeView { + t.align = align + return t +} + +// SetGraphics sets a flag which determines whether or not line graphics are +// drawn to illustrate the tree's hierarchy. +func (t *TreeView) SetGraphics(showGraphics bool) *TreeView { + t.graphics = showGraphics + return t +} + +// SetGraphicsColor sets the colors of the lines used to draw the tree structure. +func (t *TreeView) SetGraphicsColor(color tcell.Color) *TreeView { + t.graphicsColor = color + return t +} + +// SetChangedFunc sets the function which is called when the user navigates to +// a new tree node. +func (t *TreeView) SetChangedFunc(handler func(node *TreeNode)) *TreeView { + t.changed = handler + return t +} + +// SetSelectedFunc sets the function which is called when the user selects a +// node by pressing Enter on the current selection. +func (t *TreeView) SetSelectedFunc(handler func(node *TreeNode)) *TreeView { + t.selected = handler + return t +} + +// process builds the visible tree, populates the "nodes" slice, and processes +// pending selection actions. +func (t *TreeView) process() { + _, _, _, height := t.GetInnerRect() + + // Determine visible nodes and their placement. + var graphicsOffset, maxTextX int + t.nodes = nil + selectedIndex := -1 + topLevelGraphicsX := -1 + if t.graphics { + graphicsOffset = 1 + } + t.root.Walk(func(node, parent *TreeNode) bool { + // Set node attributes. + node.parent = parent + if parent == nil { + node.level = 0 + node.graphicsX = 0 + node.textX = 0 + } else { + node.level = parent.level + 1 + node.graphicsX = parent.textX + node.textX = node.graphicsX + graphicsOffset + node.indent + } + if !t.graphics && t.align { + // Without graphics, we align nodes on the first column. + node.textX = 0 + } + if node.level == t.topLevel { + // No graphics for top level nodes. + node.graphicsX = 0 + node.textX = 0 + } + if node.textX > maxTextX { + maxTextX = node.textX + } + if node == t.currentNode && node.selectable { + selectedIndex = len(t.nodes) + } + + // Maybe we want to skip this level. + if t.topLevel == node.level && (topLevelGraphicsX < 0 || node.graphicsX < topLevelGraphicsX) { + topLevelGraphicsX = node.graphicsX + } + + // Add and recurse (if desired). + if node.level >= t.topLevel { + t.nodes = append(t.nodes, node) + } + return node.expanded + }) + + // Post-process positions. + for _, node := range t.nodes { + // If text must align, we correct the positions. + if t.align && node.level > t.topLevel { + node.textX = maxTextX + } + + // If we skipped levels, shift to the left. + if topLevelGraphicsX > 0 { + node.graphicsX -= topLevelGraphicsX + node.textX -= topLevelGraphicsX + } + } + + // Process selection. (Also trigger events if necessary.) + if selectedIndex >= 0 { + // Move the selection. + newSelectedIndex := selectedIndex + MovementSwitch: + switch t.movement { + case treeUp: + for newSelectedIndex > 0 { + newSelectedIndex-- + if t.nodes[newSelectedIndex].selectable { + break MovementSwitch + } + } + newSelectedIndex = selectedIndex + case treeDown: + for newSelectedIndex < len(t.nodes)-1 { + newSelectedIndex++ + if t.nodes[newSelectedIndex].selectable { + break MovementSwitch + } + } + newSelectedIndex = selectedIndex + case treeHome: + for newSelectedIndex = 0; newSelectedIndex < len(t.nodes); newSelectedIndex++ { + if t.nodes[newSelectedIndex].selectable { + break MovementSwitch + } + } + newSelectedIndex = selectedIndex + case treeEnd: + for newSelectedIndex = len(t.nodes) - 1; newSelectedIndex >= 0; newSelectedIndex-- { + if t.nodes[newSelectedIndex].selectable { + break MovementSwitch + } + } + newSelectedIndex = selectedIndex + case treePageUp: + if newSelectedIndex+height < len(t.nodes) { + newSelectedIndex += height + } else { + newSelectedIndex = len(t.nodes) - 1 + } + for ; newSelectedIndex < len(t.nodes); newSelectedIndex++ { + if t.nodes[newSelectedIndex].selectable { + break MovementSwitch + } + } + newSelectedIndex = selectedIndex + case treePageDown: + if newSelectedIndex >= height { + newSelectedIndex -= height + } else { + newSelectedIndex = 0 + } + for ; newSelectedIndex >= 0; newSelectedIndex-- { + if t.nodes[newSelectedIndex].selectable { + break MovementSwitch + } + } + newSelectedIndex = selectedIndex + } + t.currentNode = t.nodes[newSelectedIndex] + if newSelectedIndex != selectedIndex { + t.movement = treeNone + if t.changed != nil { + t.changed(t.currentNode) + } + } + selectedIndex = newSelectedIndex + + // Move selection into viewport. + if selectedIndex-t.offsetY >= height { + t.offsetY = selectedIndex - height + 1 + } + if selectedIndex < t.offsetY { + t.offsetY = selectedIndex + } + } else { + // If selection is not visible or selectable, select the first candidate. + if t.currentNode != nil { + for index, node := range t.nodes { + if node.selectable { + selectedIndex = index + t.currentNode = node + break + } + } + } + if selectedIndex < 0 { + t.currentNode = nil + } + } +} + +// Draw draws this primitive onto the screen. +func (t *TreeView) Draw(screen tcell.Screen) { + t.Box.Draw(screen) + if t.root == nil { + return + } + + // Build the tree if necessary. + if t.nodes == nil { + t.process() + } + defer func() { + t.nodes = nil // Rebuild during next call to Draw() + }() + + // Scroll the tree. + x, y, width, height := t.GetInnerRect() + switch t.movement { + case treeUp: + t.offsetY-- + case treeDown: + t.offsetY++ + case treeHome: + t.offsetY = 0 + case treeEnd: + t.offsetY = len(t.nodes) + case treePageUp: + t.offsetY -= height + case treePageDown: + t.offsetY += height + } + t.movement = treeNone + + // Fix invalid offsets. + if t.offsetY >= len(t.nodes)-height { + t.offsetY = len(t.nodes) - height + } + if t.offsetY < 0 { + t.offsetY = 0 + } + + // Draw the tree. + posY := y + lineStyle := tcell.StyleDefault.Background(t.backgroundColor).Foreground(t.graphicsColor) + for index, node := range t.nodes { + // Skip invisible parts. + if posY >= y+height+1 { + break + } + if index < t.offsetY { + continue + } + + // Draw the graphics. + if t.graphics { + // Draw ancestor branches. + ancestor := node.parent + for ancestor != nil && ancestor.parent != nil && ancestor.parent.level >= t.topLevel { + if ancestor.graphicsX >= width { + continue + } + + // Draw a branch if this ancestor is not a last child. + if ancestor.parent.children[len(ancestor.parent.children)-1] != ancestor { + if posY-1 >= y && ancestor.textX > ancestor.graphicsX { + PrintJoinedSemigraphics(screen, x+ancestor.graphicsX, posY-1, Borders.Vertical, t.graphicsColor) + } + if posY < y+height { + screen.SetContent(x+ancestor.graphicsX, posY, Borders.Vertical, nil, lineStyle) + } + } + ancestor = ancestor.parent + } + + if node.textX > node.graphicsX && node.graphicsX < width { + // Connect to the node above. + if posY-1 >= y && t.nodes[index-1].graphicsX <= node.graphicsX && t.nodes[index-1].textX > node.graphicsX { + PrintJoinedSemigraphics(screen, x+node.graphicsX, posY-1, Borders.TopLeft, t.graphicsColor) + } + + // Join this node. + if posY < y+height { + screen.SetContent(x+node.graphicsX, posY, Borders.BottomLeft, nil, lineStyle) + for pos := node.graphicsX + 1; pos < node.textX && pos < width; pos++ { + screen.SetContent(x+pos, posY, Borders.Horizontal, nil, lineStyle) + } + } + } + } + + // Draw the prefix and the text. + if node.textX < width && posY < y+height { + // Prefix. + var prefixWidth int + if len(t.prefixes) > 0 { + _, prefixWidth = Print(screen, t.prefixes[(node.level-t.topLevel)%len(t.prefixes)], x+node.textX, posY, width-node.textX, AlignLeft, node.color) + } + + // Text. + if node.textX+prefixWidth < width { + style := tcell.StyleDefault.Foreground(node.color) + if node == t.currentNode { + style = tcell.StyleDefault.Background(node.color).Foreground(t.backgroundColor) + } + printWithStyle(screen, node.text, x+node.textX+prefixWidth, posY, width-node.textX-prefixWidth, AlignLeft, style) + } + } + + // Advance. + posY++ + } +} + +// InputHandler returns the handler for this primitive. +func (t *TreeView) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) { + return t.WrapInputHandler(func(event *tcell.EventKey, setFocus func(p Primitive)) { + // Because the tree is flattened into a list only at drawing time, we also + // postpone the (selection) movement to drawing time. + switch key := event.Key(); key { + case tcell.KeyTab, tcell.KeyDown, tcell.KeyRight: + t.movement = treeDown + case tcell.KeyBacktab, tcell.KeyUp, tcell.KeyLeft: + t.movement = treeUp + case tcell.KeyHome: + t.movement = treeHome + case tcell.KeyEnd: + t.movement = treeEnd + case tcell.KeyPgDn, tcell.KeyCtrlF: + t.movement = treePageDown + case tcell.KeyPgUp, tcell.KeyCtrlB: + t.movement = treePageUp + case tcell.KeyRune: + switch event.Rune() { + case 'g': + t.movement = treeHome + case 'G': + t.movement = treeEnd + case 'j': + t.movement = treeDown + case 'k': + t.movement = treeUp + } + case tcell.KeyEnter: + if t.currentNode != nil { + if t.selected != nil { + t.selected(t.currentNode) + } + if t.currentNode.selected != nil { + t.currentNode.selected() + } + } + } + + t.process() + }) +} diff --git a/vendor/github.com/rivo/tview/util.go b/vendor/github.com/rivo/tview/util.go new file mode 100644 index 00000000000..e12b53ca44c --- /dev/null +++ b/vendor/github.com/rivo/tview/util.go @@ -0,0 +1,608 @@ +package tview + +import ( + "math" + "regexp" + "strconv" + "unicode" + + "github.com/gdamore/tcell" + runewidth "github.com/mattn/go-runewidth" +) + +// Text alignment within a box. +const ( + AlignLeft = iota + AlignCenter + AlignRight +) + +// Common regular expressions. +var ( + colorPattern = regexp.MustCompile(`\[([a-zA-Z]+|#[0-9a-zA-Z]{6}|\-)?(:([a-zA-Z]+|#[0-9a-zA-Z]{6}|\-)?(:([lbdru]+|\-)?)?)?\]`) + regionPattern = regexp.MustCompile(`\["([a-zA-Z0-9_,;: \-\.]*)"\]`) + escapePattern = regexp.MustCompile(`\[([a-zA-Z0-9_,;: \-\."#]+)\[(\[*)\]`) + nonEscapePattern = regexp.MustCompile(`(\[[a-zA-Z0-9_,;: \-\."#]+\[*)\]`) + boundaryPattern = regexp.MustCompile(`(([[:punct:]]|\n)[ \t\f\r]*|(\s+))`) + spacePattern = regexp.MustCompile(`\s+`) +) + +// Positions of substrings in regular expressions. +const ( + colorForegroundPos = 1 + colorBackgroundPos = 3 + colorFlagPos = 5 +) + +// Predefined InputField acceptance functions. +var ( + // InputFieldInteger accepts integers. + InputFieldInteger func(text string, ch rune) bool + + // InputFieldFloat accepts floating-point numbers. + InputFieldFloat func(text string, ch rune) bool + + // InputFieldMaxLength returns an input field accept handler which accepts + // input strings up to a given length. Use it like this: + // + // inputField.SetAcceptanceFunc(InputFieldMaxLength(10)) // Accept up to 10 characters. + InputFieldMaxLength func(maxLength int) func(text string, ch rune) bool +) + +// Package initialization. +func init() { + // We'll use zero width joiners. + runewidth.ZeroWidthJoiner = true + + // Initialize the predefined input field handlers. + InputFieldInteger = func(text string, ch rune) bool { + if text == "-" { + return true + } + _, err := strconv.Atoi(text) + return err == nil + } + InputFieldFloat = func(text string, ch rune) bool { + if text == "-" || text == "." || text == "-." { + return true + } + _, err := strconv.ParseFloat(text, 64) + return err == nil + } + InputFieldMaxLength = func(maxLength int) func(text string, ch rune) bool { + return func(text string, ch rune) bool { + return len([]rune(text)) <= maxLength + } + } +} + +// styleFromTag takes the given style, defined by a foreground color (fgColor), +// a background color (bgColor), and style attributes, and modifies it based on +// the substrings (tagSubstrings) extracted by the regular expression for color +// tags. The new colors and attributes are returned where empty strings mean +// "don't modify" and a dash ("-") means "reset to default". +func styleFromTag(fgColor, bgColor, attributes string, tagSubstrings []string) (newFgColor, newBgColor, newAttributes string) { + if tagSubstrings[colorForegroundPos] != "" { + color := tagSubstrings[colorForegroundPos] + if color == "-" { + fgColor = "-" + } else if color != "" { + fgColor = color + } + } + + if tagSubstrings[colorBackgroundPos-1] != "" { + color := tagSubstrings[colorBackgroundPos] + if color == "-" { + bgColor = "-" + } else if color != "" { + bgColor = color + } + } + + if tagSubstrings[colorFlagPos-1] != "" { + flags := tagSubstrings[colorFlagPos] + if flags == "-" { + attributes = "-" + } else if flags != "" { + attributes = flags + } + } + + return fgColor, bgColor, attributes +} + +// overlayStyle mixes a background color with a foreground color (fgColor), +// a (possibly new) background color (bgColor), and style attributes, and +// returns the resulting style. For a definition of the colors and attributes, +// see styleFromTag(). Reset instructions cause the corresponding part of the +// default style to be used. +func overlayStyle(background tcell.Color, defaultStyle tcell.Style, fgColor, bgColor, attributes string) tcell.Style { + defFg, defBg, defAttr := defaultStyle.Decompose() + style := defaultStyle.Background(background) + + style = style.Foreground(defFg) + if fgColor != "" { + style = style.Foreground(tcell.GetColor(fgColor)) + } + + if bgColor == "-" || bgColor == "" && defBg != tcell.ColorDefault { + style = style.Background(defBg) + } else if bgColor != "" { + style = style.Background(tcell.GetColor(bgColor)) + } + + if attributes == "-" { + style = style.Bold(defAttr&tcell.AttrBold > 0) + style = style.Blink(defAttr&tcell.AttrBlink > 0) + style = style.Reverse(defAttr&tcell.AttrReverse > 0) + style = style.Underline(defAttr&tcell.AttrUnderline > 0) + style = style.Dim(defAttr&tcell.AttrDim > 0) + } else if attributes != "" { + style = style.Normal() + for _, flag := range attributes { + switch flag { + case 'l': + style = style.Blink(true) + case 'b': + style = style.Bold(true) + case 'd': + style = style.Dim(true) + case 'r': + style = style.Reverse(true) + case 'u': + style = style.Underline(true) + } + } + } + + return style +} + +// decomposeString returns information about a string which may contain color +// tags. It returns the indices of the color tags (as returned by +// re.FindAllStringIndex()), the color tags themselves (as returned by +// re.FindAllStringSubmatch()), the indices of an escaped tags, the string +// stripped by any color tags and escaped, and the screen width of the stripped +// string. +func decomposeString(text string) (colorIndices [][]int, colors [][]string, escapeIndices [][]int, stripped string, width int) { + // Get positions of color and escape tags. + colorIndices = colorPattern.FindAllStringIndex(text, -1) + colors = colorPattern.FindAllStringSubmatch(text, -1) + escapeIndices = escapePattern.FindAllStringIndex(text, -1) + + // Because the color pattern detects empty tags, we need to filter them out. + for i := len(colorIndices) - 1; i >= 0; i-- { + if colorIndices[i][1]-colorIndices[i][0] == 2 { + colorIndices = append(colorIndices[:i], colorIndices[i+1:]...) + colors = append(colors[:i], colors[i+1:]...) + } + } + + // Remove the color tags from the original string. + var from int + buf := make([]byte, 0, len(text)) + for _, indices := range colorIndices { + buf = append(buf, []byte(text[from:indices[0]])...) + from = indices[1] + } + buf = append(buf, text[from:]...) + + // Escape string. + stripped = string(escapePattern.ReplaceAll(buf, []byte("[$1$2]"))) + + // Get the width of the stripped string. + width = runewidth.StringWidth(stripped) + + return +} + +// Print prints text onto the screen into the given box at (x,y,maxWidth,1), +// not exceeding that box. "align" is one of AlignLeft, AlignCenter, or +// AlignRight. The screen's background color will not be changed. +// +// You can change the colors and text styles mid-text by inserting a color tag. +// See the package description for details. +// +// Returns the number of actual bytes of the text printed (including color tags) +// and the actual width used for the printed runes. +func Print(screen tcell.Screen, text string, x, y, maxWidth, align int, color tcell.Color) (int, int) { + return printWithStyle(screen, text, x, y, maxWidth, align, tcell.StyleDefault.Foreground(color)) +} + +// printWithStyle works like Print() but it takes a style instead of just a +// foreground color. +func printWithStyle(screen tcell.Screen, text string, x, y, maxWidth, align int, style tcell.Style) (int, int) { + if maxWidth <= 0 || len(text) == 0 { + return 0, 0 + } + + // Decompose the text. + colorIndices, colors, escapeIndices, strippedText, strippedWidth := decomposeString(text) + + // We want to reduce all alignments to AlignLeft. + if align == AlignRight { + if strippedWidth <= maxWidth { + // There's enough space for the entire text. + return printWithStyle(screen, text, x+maxWidth-strippedWidth, y, maxWidth, AlignLeft, style) + } + // Trim characters off the beginning. + var ( + bytes, width, colorPos, escapePos, tagOffset int + foregroundColor, backgroundColor, attributes string + ) + _, originalBackground, _ := style.Decompose() + iterateString(strippedText, func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + // Update color/escape tag offset and style. + if colorPos < len(colorIndices) && textPos+tagOffset >= colorIndices[colorPos][0] && textPos+tagOffset < colorIndices[colorPos][1] { + foregroundColor, backgroundColor, attributes = styleFromTag(foregroundColor, backgroundColor, attributes, colors[colorPos]) + style = overlayStyle(originalBackground, style, foregroundColor, backgroundColor, attributes) + tagOffset += colorIndices[colorPos][1] - colorIndices[colorPos][0] + colorPos++ + } + if escapePos < len(escapeIndices) && textPos+tagOffset >= escapeIndices[escapePos][0] && textPos+tagOffset < escapeIndices[escapePos][1] { + tagOffset++ + escapePos++ + } + if strippedWidth-screenPos < maxWidth { + // We chopped off enough. + if escapePos > 0 && textPos+tagOffset-1 >= escapeIndices[escapePos-1][0] && textPos+tagOffset-1 < escapeIndices[escapePos-1][1] { + // Unescape open escape sequences. + escapeCharPos := escapeIndices[escapePos-1][1] - 2 + text = text[:escapeCharPos] + text[escapeCharPos+1:] + } + // Print and return. + bytes, width = printWithStyle(screen, text[textPos+tagOffset:], x, y, maxWidth, AlignLeft, style) + return true + } + return false + }) + return bytes, width + } else if align == AlignCenter { + if strippedWidth == maxWidth { + // Use the exact space. + return printWithStyle(screen, text, x, y, maxWidth, AlignLeft, style) + } else if strippedWidth < maxWidth { + // We have more space than we need. + half := (maxWidth - strippedWidth) / 2 + return printWithStyle(screen, text, x+half, y, maxWidth-half, AlignLeft, style) + } else { + // Chop off runes until we have a perfect fit. + var choppedLeft, choppedRight, leftIndex, rightIndex int + rightIndex = len(strippedText) + for rightIndex-1 > leftIndex && strippedWidth-choppedLeft-choppedRight > maxWidth { + if choppedLeft < choppedRight { + // Iterate on the left by one character. + iterateString(strippedText[leftIndex:], func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + choppedLeft += screenWidth + leftIndex += textWidth + return true + }) + } else { + // Iterate on the right by one character. + iterateStringReverse(strippedText[leftIndex:rightIndex], func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + choppedRight += screenWidth + rightIndex -= textWidth + return true + }) + } + } + + // Add tag offsets and determine start style. + var ( + colorPos, escapePos, tagOffset int + foregroundColor, backgroundColor, attributes string + ) + _, originalBackground, _ := style.Decompose() + for index := range strippedText { + // We only need the offset of the left index. + if index > leftIndex { + // We're done. + if escapePos > 0 && leftIndex+tagOffset-1 >= escapeIndices[escapePos-1][0] && leftIndex+tagOffset-1 < escapeIndices[escapePos-1][1] { + // Unescape open escape sequences. + escapeCharPos := escapeIndices[escapePos-1][1] - 2 + text = text[:escapeCharPos] + text[escapeCharPos+1:] + } + break + } + + // Update color/escape tag offset. + if colorPos < len(colorIndices) && index+tagOffset >= colorIndices[colorPos][0] && index+tagOffset < colorIndices[colorPos][1] { + if index <= leftIndex { + foregroundColor, backgroundColor, attributes = styleFromTag(foregroundColor, backgroundColor, attributes, colors[colorPos]) + style = overlayStyle(originalBackground, style, foregroundColor, backgroundColor, attributes) + } + tagOffset += colorIndices[colorPos][1] - colorIndices[colorPos][0] + colorPos++ + } + if escapePos < len(escapeIndices) && index+tagOffset >= escapeIndices[escapePos][0] && index+tagOffset < escapeIndices[escapePos][1] { + tagOffset++ + escapePos++ + } + } + return printWithStyle(screen, text[leftIndex+tagOffset:], x, y, maxWidth, AlignLeft, style) + } + } + + // Draw text. + var ( + drawn, drawnWidth, colorPos, escapePos, tagOffset int + foregroundColor, backgroundColor, attributes string + ) + iterateString(strippedText, func(main rune, comb []rune, textPos, length, screenPos, screenWidth int) bool { + // Only continue if there is still space. + if drawnWidth+screenWidth > maxWidth { + return true + } + + // Handle color tags. + if colorPos < len(colorIndices) && textPos+tagOffset >= colorIndices[colorPos][0] && textPos+tagOffset < colorIndices[colorPos][1] { + foregroundColor, backgroundColor, attributes = styleFromTag(foregroundColor, backgroundColor, attributes, colors[colorPos]) + tagOffset += colorIndices[colorPos][1] - colorIndices[colorPos][0] + colorPos++ + } + + // Handle scape tags. + if escapePos < len(escapeIndices) && textPos+tagOffset >= escapeIndices[escapePos][0] && textPos+tagOffset < escapeIndices[escapePos][1] { + if textPos+tagOffset == escapeIndices[escapePos][1]-2 { + tagOffset++ + escapePos++ + } + } + + // Print the rune sequence. + finalX := x + drawnWidth + _, _, finalStyle, _ := screen.GetContent(finalX, y) + _, background, _ := finalStyle.Decompose() + finalStyle = overlayStyle(background, style, foregroundColor, backgroundColor, attributes) + for offset := screenWidth - 1; offset >= 0; offset-- { + // To avoid undesired effects, we populate all cells. + if offset == 0 { + screen.SetContent(finalX+offset, y, main, comb, finalStyle) + } else { + screen.SetContent(finalX+offset, y, ' ', nil, finalStyle) + } + } + + // Advance. + drawn += length + drawnWidth += screenWidth + + return false + }) + + return drawn + tagOffset + len(escapeIndices), drawnWidth +} + +// PrintSimple prints white text to the screen at the given position. +func PrintSimple(screen tcell.Screen, text string, x, y int) { + Print(screen, text, x, y, math.MaxInt32, AlignLeft, Styles.PrimaryTextColor) +} + +// StringWidth returns the width of the given string needed to print it on +// screen. The text may contain color tags which are not counted. +func StringWidth(text string) int { + _, _, _, _, width := decomposeString(text) + return width +} + +// WordWrap splits a text such that each resulting line does not exceed the +// given screen width. Possible split points are after any punctuation or +// whitespace. Whitespace after split points will be dropped. +// +// This function considers color tags to have no width. +// +// Text is always split at newline characters ('\n'). +func WordWrap(text string, width int) (lines []string) { + colorTagIndices, _, escapeIndices, strippedText, _ := decomposeString(text) + + // Find candidate breakpoints. + breakpoints := boundaryPattern.FindAllStringSubmatchIndex(strippedText, -1) + // Results in one entry for each candidate. Each entry is an array a of + // indices into strippedText where a[6] < 0 for newline/punctuation matches + // and a[4] < 0 for whitespace matches. + + // Process stripped text one character at a time. + var ( + colorPos, escapePos, breakpointPos, tagOffset int + lastBreakpoint, lastContinuation, currentLineStart int + lineWidth, continuationWidth int + newlineBreakpoint bool + ) + unescape := func(substr string, startIndex int) string { + // A helper function to unescape escaped tags. + for index := escapePos; index >= 0; index-- { + if index < len(escapeIndices) && startIndex > escapeIndices[index][0] && startIndex < escapeIndices[index][1]-1 { + pos := escapeIndices[index][1] - 2 - startIndex + return substr[:pos] + substr[pos+1:] + } + } + return substr + } + iterateString(strippedText, func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool { + // Handle colour tags. + if colorPos < len(colorTagIndices) && textPos+tagOffset >= colorTagIndices[colorPos][0] && textPos+tagOffset < colorTagIndices[colorPos][1] { + tagOffset += colorTagIndices[colorPos][1] - colorTagIndices[colorPos][0] + colorPos++ + } + + // Handle escape tags. + if escapePos < len(escapeIndices) && textPos+tagOffset == escapeIndices[escapePos][1]-2 { + tagOffset++ + escapePos++ + } + + // Check if a break is warranted. + afterContinuation := lastContinuation > 0 && textPos+tagOffset >= lastContinuation + noBreakpoint := lastContinuation == 0 + beyondWidth := lineWidth > 0 && lineWidth > width + if beyondWidth && noBreakpoint { + // We need a hard break without a breakpoint. + lines = append(lines, unescape(text[currentLineStart:textPos+tagOffset], currentLineStart)) + currentLineStart = textPos + tagOffset + lineWidth = continuationWidth + } else if afterContinuation && (beyondWidth || newlineBreakpoint) { + // Break at last breakpoint or at newline. + lines = append(lines, unescape(text[currentLineStart:lastBreakpoint], currentLineStart)) + currentLineStart = lastContinuation + lineWidth = continuationWidth + lastBreakpoint, lastContinuation, newlineBreakpoint = 0, 0, false + } + + // Is this a breakpoint? + if breakpointPos < len(breakpoints) && textPos == breakpoints[breakpointPos][0] { + // Yes, it is. Set up breakpoint infos depending on its type. + lastBreakpoint = breakpoints[breakpointPos][0] + tagOffset + lastContinuation = breakpoints[breakpointPos][1] + tagOffset + newlineBreakpoint = main == '\n' + if breakpoints[breakpointPos][6] < 0 && !newlineBreakpoint { + lastBreakpoint++ // Don't skip punctuation. + } + breakpointPos++ + } + + // Once we hit the continuation point, we start buffering widths. + if textPos+tagOffset < lastContinuation { + continuationWidth = 0 + } + + lineWidth += screenWidth + continuationWidth += screenWidth + return false + }) + + // Flush the rest. + if currentLineStart < len(text) { + lines = append(lines, unescape(text[currentLineStart:], currentLineStart)) + } + + return +} + +// Escape escapes the given text such that color and/or region tags are not +// recognized and substituted by the print functions of this package. For +// example, to include a tag-like string in a box title or in a TextView: +// +// box.SetTitle(tview.Escape("[squarebrackets]")) +// fmt.Fprint(textView, tview.Escape(`["quoted"]`)) +func Escape(text string) string { + return nonEscapePattern.ReplaceAllString(text, "$1[]") +} + +// iterateString iterates through the given string one printed character at a +// time. For each such character, the callback function is called with the +// Unicode code points of the character (the first rune and any combining runes +// which may be nil if there aren't any), the starting position (in bytes) +// within the original string, its length in bytes, the screen position of the +// character, and the screen width of it. The iteration stops if the callback +// returns true. This function returns true if the iteration was stopped before +// the last character. +func iterateString(text string, callback func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool) bool { + var ( + runes []rune + lastZeroWidthJoiner bool + startIndex int + startPos int + pos int + ) + + // Helper function which invokes the callback. + flush := func(index int) bool { + var comb []rune + if len(runes) > 1 { + comb = runes[1:] + } + return callback(runes[0], comb, startIndex, index-startIndex, startPos, pos-startPos) + } + + for index, r := range text { + if unicode.In(r, unicode.M) || r == '\u200d' { + lastZeroWidthJoiner = r == '\u200d' + } else { + // We have a rune that's not a modifier. It could be the beginning of a + // new character. + if !lastZeroWidthJoiner { + if len(runes) > 0 { + // It is. Invoke callback. + if flush(index) { + return true // We're done. + } + // Reset rune store. + runes = runes[:0] + startIndex = index + startPos = pos + } + pos += runewidth.RuneWidth(r) + } else { + lastZeroWidthJoiner = false + } + } + runes = append(runes, r) + } + + // Flush any remaining runes. + if len(runes) > 0 { + flush(len(text)) + } + + return false +} + +// iterateStringReverse iterates through the given string in reverse, starting +// from the end of the string, one printed character at a time. For each such +// character, the callback function is called with the Unicode code points of +// the character (the first rune and any combining runes which may be nil if +// there aren't any), the starting position (in bytes) within the original +// string, its length in bytes, the screen position of the character, and the +// screen width of it. The iteration stops if the callback returns true. This +// function returns true if the iteration was stopped before the last character. +func iterateStringReverse(text string, callback func(main rune, comb []rune, textPos, textWidth, screenPos, screenWidth int) bool) bool { + type runePos struct { + r rune + pos int // The byte position of the rune in the original string. + width int // The screen width of the rune. + mod bool // Modifier or zero-width-joiner. + } + + // We use the following: + // len(text) >= number of runes in text. + + // Put all runes into a runePos slice in reverse. + runesReverse := make([]runePos, len(text)) + index := len(text) - 1 + for pos, ch := range text { + runesReverse[index].r = ch + runesReverse[index].pos = pos + runesReverse[index].width = runewidth.RuneWidth(ch) + runesReverse[index].mod = unicode.In(ch, unicode.Lm, unicode.M) || ch == '\u200d' + index-- + } + runesReverse = runesReverse[index+1:] + + // Parse reverse runes. + var screenWidth int + buffer := make([]rune, len(text)) // We fill this up from the back so it's forward again. + bufferPos := len(text) + stringWidth := runewidth.StringWidth(text) + for index, r := range runesReverse { + // Put this rune into the buffer. + bufferPos-- + buffer[bufferPos] = r.r + + // Do we need to flush the buffer? + if r.pos == 0 || !r.mod && runesReverse[index+1].r != '\u200d' { + // Yes, invoke callback. + var comb []rune + if len(text)-bufferPos > 1 { + comb = buffer[bufferPos+1:] + } + if callback(r.r, comb, r.pos, len(text)-r.pos, stringWidth-screenWidth, r.width) { + return true + } + screenWidth += r.width + bufferPos = len(text) + } + } + + return false +} diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE b/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE new file mode 100644 index 00000000000..c35c17af980 --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2015 Dmitri Shuralyov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/main.go b/vendor/github.com/shurcooL/sanitized_anchor_name/main.go new file mode 100644 index 00000000000..6a77d124317 --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/main.go @@ -0,0 +1,29 @@ +// Package sanitized_anchor_name provides a func to create sanitized anchor names. +// +// Its logic can be reused by multiple packages to create interoperable anchor names +// and links to those anchors. +// +// At this time, it does not try to ensure that generated anchor names +// are unique, that responsibility falls on the caller. +package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name" + +import "unicode" + +// Create returns a sanitized anchor name for the given text. +func Create(text string) string { + var anchorName []rune + var futureDash = false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/golang.org/x/text/encoding/encoding.go b/vendor/golang.org/x/text/encoding/encoding.go new file mode 100644 index 00000000000..221f175c01e --- /dev/null +++ b/vendor/golang.org/x/text/encoding/encoding.go @@ -0,0 +1,335 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding defines an interface for character encodings, such as Shift +// JIS and Windows 1252, that can convert to and from UTF-8. +// +// Encoding implementations are provided in other packages, such as +// golang.org/x/text/encoding/charmap and +// golang.org/x/text/encoding/japanese. +package encoding // import "golang.org/x/text/encoding" + +import ( + "errors" + "io" + "strconv" + "unicode/utf8" + + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// TODO: +// - There seems to be some inconsistency in when decoders return errors +// and when not. Also documentation seems to suggest they shouldn't return +// errors at all (except for UTF-16). +// - Encoders seem to rely on or at least benefit from the input being in NFC +// normal form. Perhaps add an example how users could prepare their output. + +// Encoding is a character set encoding that can be transformed to and from +// UTF-8. +type Encoding interface { + // NewDecoder returns a Decoder. + NewDecoder() *Decoder + + // NewEncoder returns an Encoder. + NewEncoder() *Encoder +} + +// A Decoder converts bytes to UTF-8. It implements transform.Transformer. +// +// Transforming source bytes that are not of that encoding will not result in an +// error per se. Each byte that cannot be transcoded will be represented in the +// output by the UTF-8 encoding of '\uFFFD', the replacement rune. +type Decoder struct { + transform.Transformer + + // This forces external creators of Decoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts the given encoded bytes to UTF-8. It returns the converted +// bytes or nil, err if any error occurred. +func (d *Decoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(d, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts the given encoded string to UTF-8. It returns the converted +// string or "", err if any error occurred. +func (d *Decoder) String(s string) (string, error) { + s, _, err := transform.String(d, s) + if err != nil { + return "", err + } + return s, nil +} + +// Reader wraps another Reader to decode its bytes. +// +// The Decoder may not be used for any other operation as long as the returned +// Reader is in use. +func (d *Decoder) Reader(r io.Reader) io.Reader { + return transform.NewReader(r, d) +} + +// An Encoder converts bytes from UTF-8. It implements transform.Transformer. +// +// Each rune that cannot be transcoded will result in an error. In this case, +// the transform will consume all source byte up to, not including the offending +// rune. Transforming source bytes that are not valid UTF-8 will be replaced by +// `\uFFFD`. To return early with an error instead, use transform.Chain to +// preprocess the data with a UTF8Validator. +type Encoder struct { + transform.Transformer + + // This forces external creators of Encoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if +// any error occurred. +func (e *Encoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(e, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts a string from UTF-8. It returns the converted string or +// "", err if any error occurred. +func (e *Encoder) String(s string) (string, error) { + s, _, err := transform.String(e, s) + if err != nil { + return "", err + } + return s, nil +} + +// Writer wraps another Writer to encode its UTF-8 output. +// +// The Encoder may not be used for any other operation as long as the returned +// Writer is in use. +func (e *Encoder) Writer(w io.Writer) io.Writer { + return transform.NewWriter(w, e) +} + +// ASCIISub is the ASCII substitute character, as recommended by +// http://unicode.org/reports/tr36/#Text_Comparison +const ASCIISub = '\x1a' + +// Nop is the nop encoding. Its transformed bytes are the same as the source +// bytes; it does not replace invalid UTF-8 sequences. +var Nop Encoding = nop{} + +type nop struct{} + +func (nop) NewDecoder() *Decoder { + return &Decoder{Transformer: transform.Nop} +} +func (nop) NewEncoder() *Encoder { + return &Encoder{Transformer: transform.Nop} +} + +// Replacement is the replacement encoding. Decoding from the replacement +// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to +// the replacement encoding yields the same as the source bytes except that +// invalid UTF-8 is converted to '\uFFFD'. +// +// It is defined at http://encoding.spec.whatwg.org/#replacement +var Replacement Encoding = replacement{} + +type replacement struct{} + +func (replacement) NewDecoder() *Decoder { + return &Decoder{Transformer: replacementDecoder{}} +} + +func (replacement) NewEncoder() *Encoder { + return &Encoder{Transformer: replacementEncoder{}} +} + +func (replacement) ID() (mib identifier.MIB, other string) { + return identifier.Replacement, "" +} + +type replacementDecoder struct{ transform.NopResetter } + +func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(dst) < 3 { + return 0, 0, transform.ErrShortDst + } + if atEOF { + const fffd = "\ufffd" + dst[0] = fffd[0] + dst[1] = fffd[1] + dst[2] = fffd[2] + nDst = 3 + } + return nDst, len(src), nil +} + +type replacementEncoder struct{ transform.NopResetter } + +func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + r, size := rune(0), 0 + + for ; nSrc < len(src); nSrc += size { + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + r = '\ufffd' + } + } + + if nDst+utf8.RuneLen(r) > len(dst) { + err = transform.ErrShortDst + break + } + nDst += utf8.EncodeRune(dst[nDst:], r) + } + return nDst, nSrc, err +} + +// HTMLEscapeUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with HTML escape sequences. +// +// This wrapper exists to comply to URL and HTML forms requiring a +// non-terminating legacy encoder. The produced sequences may lead to data +// loss as they are indistinguishable from legitimate input. To avoid this +// issue, use UTF-8 encodings whenever possible. +func HTMLEscapeUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToHTML}} +} + +// ReplaceUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with an encoding-specific +// replacement. +// +// This wrapper is only provided for backwards compatibility and legacy +// handling. Its use is strongly discouraged. Use UTF-8 whenever possible. +func ReplaceUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToReplacement}} +} + +type errorHandler struct { + *Encoder + handler func(dst []byte, r rune, err repertoireError) (n int, ok bool) +} + +// TODO: consider making this error public in some form. +type repertoireError interface { + Replacement() byte +} + +func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF) + for err != nil { + rerr, ok := err.(repertoireError) + if !ok { + return nDst, nSrc, err + } + r, sz := utf8.DecodeRune(src[nSrc:]) + n, ok := h.handler(dst[nDst:], r, rerr) + if !ok { + return nDst, nSrc, transform.ErrShortDst + } + err = nil + nDst += n + if nSrc += sz; nSrc < len(src) { + var dn, sn int + dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF) + nDst += dn + nSrc += sn + } + } + return nDst, nSrc, err +} + +func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) { + buf := [8]byte{} + b := strconv.AppendUint(buf[:0], uint64(r), 10) + if n = len(b) + len("&#;"); n >= len(dst) { + return 0, false + } + dst[0] = '&' + dst[1] = '#' + dst[copy(dst[2:], b)+2] = ';' + return n, true +} + +func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) { + if len(dst) == 0 { + return 0, false + } + dst[0] = err.Replacement() + return 1, true +} + +// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8. +var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8") + +// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first +// input byte that is not valid UTF-8. +var UTF8Validator transform.Transformer = utf8Validator{} + +type utf8Validator struct{ transform.NopResetter } + +func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := len(src) + if n > len(dst) { + n = len(dst) + } + for i := 0; i < n; { + if c := src[i]; c < utf8.RuneSelf { + dst[i] = c + i++ + continue + } + _, size := utf8.DecodeRune(src[i:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + err = ErrInvalidUTF8 + if !atEOF && !utf8.FullRune(src[i:]) { + err = transform.ErrShortSrc + } + return i, i, err + } + if i+size > len(dst) { + return i, i, transform.ErrShortDst + } + for ; size > 0; size-- { + dst[i] = src[i] + i++ + } + } + if len(src) > len(dst) { + err = transform.ErrShortDst + } + return n, n, err +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go new file mode 100644 index 00000000000..0c8eba7e526 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "log" + "strings" + + "golang.org/x/text/internal/gen" +) + +type registry struct { + XMLName xml.Name `xml:"registry"` + Updated string `xml:"updated"` + Registry []struct { + ID string `xml:"id,attr"` + Record []struct { + Name string `xml:"name"` + Xref []struct { + Type string `xml:"type,attr"` + Data string `xml:"data,attr"` + } `xml:"xref"` + Desc struct { + Data string `xml:",innerxml"` + // Any []struct { + // Data string `xml:",chardata"` + // } `xml:",any"` + // Data string `xml:",chardata"` + } `xml:"description,"` + MIB string `xml:"value"` + Alias []string `xml:"alias"` + MIME string `xml:"preferred_alias"` + } `xml:"record"` + } `xml:"registry"` +} + +func main() { + r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") + reg := ®istry{} + if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { + log.Fatalf("Error decoding charset registry: %v", err) + } + if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { + log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) + } + + w := &bytes.Buffer{} + fmt.Fprintf(w, "const (\n") + for _, rec := range reg.Registry[0].Record { + constName := "" + for _, a := range rec.Alias { + if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { + // Some of the constant definitions have comments in them. Strip those. + constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) + } + } + if constName == "" { + switch rec.MIB { + case "2085": + constName = "HZGB2312" // Not listed as alias for some reason. + default: + log.Fatalf("No cs alias defined for %s.", rec.MIB) + } + } + if rec.MIME != "" { + rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) + } + fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) + if len(rec.Desc.Data) > 0 { + fmt.Fprint(w, "// ") + d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) + inElem := true + attr := "" + for { + t, err := d.Token() + if err != nil { + if err != io.EOF { + log.Fatal(err) + } + break + } + switch x := t.(type) { + case xml.CharData: + attr = "" // Don't need attribute info. + a := bytes.Split([]byte(x), []byte("\n")) + for i, b := range a { + if b = bytes.TrimSpace(b); len(b) != 0 { + if !inElem && i > 0 { + fmt.Fprint(w, "\n// ") + } + inElem = false + fmt.Fprintf(w, "%s ", string(b)) + } + } + case xml.StartElement: + if x.Name.Local == "xref" { + inElem = true + use := false + for _, a := range x.Attr { + if a.Name.Local == "type" { + use = use || a.Value != "person" + } + if a.Name.Local == "data" && use { + attr = a.Value + " " + } + } + } + case xml.EndElement: + inElem = false + fmt.Fprint(w, attr) + } + } + fmt.Fprint(w, "\n") + } + for _, x := range rec.Xref { + switch x.Type { + case "rfc": + fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) + case "uri": + fmt.Fprintf(w, "// Reference: %s\n", x.Data) + } + } + fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) + fmt.Fprintln(w) + } + fmt.Fprintln(w, ")") + + gen.WriteGoFile("mib.go", "identifier", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go new file mode 100644 index 00000000000..7351b4ef8af --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package identifier defines the contract between implementations of Encoding +// and Index by defining identifiers that uniquely identify standardized coded +// character sets (CCS) and character encoding schemes (CES), which we will +// together refer to as encodings, for which Encoding implementations provide +// converters to and from UTF-8. This package is typically only of concern to +// implementers of Indexes and Encodings. +// +// One part of the identifier is the MIB code, which is defined by IANA and +// uniquely identifies a CCS or CES. Each code is associated with data that +// references authorities, official documentation as well as aliases and MIME +// names. +// +// Not all CESs are covered by the IANA registry. The "other" string that is +// returned by ID can be used to identify other character sets or versions of +// existing ones. +// +// It is recommended that each package that provides a set of Encodings provide +// the All and Common variables to reference all supported encodings and +// commonly used subset. This allows Index implementations to include all +// available encodings without explicitly referencing or knowing about them. +package identifier + +// Note: this package is internal, but could be made public if there is a need +// for writing third-party Indexes and Encodings. + +// References: +// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt +// - http://www.iana.org/assignments/character-sets/character-sets.xhtml +// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib +// - http://www.ietf.org/rfc/rfc2978.txt +// - http://www.unicode.org/reports/tr22/ +// - http://www.w3.org/TR/encoding/ +// - https://encoding.spec.whatwg.org/ +// - https://encoding.spec.whatwg.org/encodings.json +// - https://tools.ietf.org/html/rfc6657#section-5 + +// Interface can be implemented by Encodings to define the CCS or CES for which +// it implements conversions. +type Interface interface { + // ID returns an encoding identifier. Exactly one of the mib and other + // values should be non-zero. + // + // In the usual case it is only necessary to indicate the MIB code. The + // other string can be used to specify encodings for which there is no MIB, + // such as "x-mac-dingbat". + // + // The other string may only contain the characters a-z, A-Z, 0-9, - and _. + ID() (mib MIB, other string) + + // NOTE: the restrictions on the encoding are to allow extending the syntax + // with additional information such as versions, vendors and other variants. +} + +// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds +// some identifiers for some encodings that are not covered by the IANA +// standard. +// +// See http://www.iana.org/assignments/ianacharset-mib. +type MIB uint16 + +// These additional MIB types are not defined in IANA. They are added because +// they are common and defined within the text repo. +const ( + // Unofficial marks the start of encodings not registered by IANA. + Unofficial MIB = 10000 + iota + + // Replacement is the WhatWG replacement encoding. + Replacement + + // XUserDefined is the code for x-user-defined. + XUserDefined + + // MacintoshCyrillic is the code for x-mac-cyrillic. + MacintoshCyrillic +) diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go new file mode 100644 index 00000000000..768842b0a5a --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -0,0 +1,1621 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package identifier + +const ( + // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII). + // + // ANSI X3.4-1986 + // Reference: RFC2046 + ASCII MIB = 3 + + // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin1 MIB = 4 + + // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin2 MIB = 5 + + // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin3 MIB = 6 + + // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin4 MIB = 7 + + // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinCyrillic MIB = 8 + + // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinArabic MIB = 9 + + // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1947 + // Reference: RFC1345 + ISOLatinGreek MIB = 10 + + // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinHebrew MIB = 11 + + // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin5 MIB = 12 + + // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin6 MIB = 13 + + // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add. + // + // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983 + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOTextComm MIB = 14 + + // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201. + // + // JIS X 0201-1976. One byte only, this is equivalent to + // JIS/Roman (similar to ASCII) plus eight-bit half-width + // Katakana + // Reference: RFC1345 + HalfWidthKatakana MIB = 15 + + // JISEncoding is the MIB identifier with IANA name JIS_Encoding. + // + // JIS X 0202-1991. Uses ISO 2022 escape sequences to + // shift code sets as documented in JIS X 0202-1991. + JISEncoding MIB = 16 + + // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS). + // + // This charset is an extension of csHalfWidthKatakana by + // adding graphic characters in JIS X 0208. The CCS's are + // JIS X0201:1997 and JIS X0208:1997. The + // complete definition is shown in Appendix 1 of JIS + // X0208:1997. + // This charset can be used for the top-level media type "text". + ShiftJIS MIB = 17 + + // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP). + // + // Standardized by OSF, UNIX International, and UNIX Systems + // Laboratories Pacific. Uses ISO 2022 rules to select + // code set 0: US-ASCII (a single 7-bit byte set) + // code set 1: JIS X0208-1990 (a double 8-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // requiring SS2 as the character prefix + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // requiring SS3 as the character prefix + EUCPkdFmtJapanese MIB = 18 + + // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese. + // + // Used in Japan. Each character is 2 octets. + // code set 0: US-ASCII (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = 20-7E + // code set 1: JIS X0208-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = A0-FF + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in + // the first byte + // and 21-7E in the second byte + EUCFixWidJapanese MIB = 19 + + // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO4UnitedKingdom MIB = 20 + + // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO11SwedishForNames MIB = 21 + + // ISO15Italian is the MIB identifier with IANA name IT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO15Italian MIB = 22 + + // ISO17Spanish is the MIB identifier with IANA name ES. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO17Spanish MIB = 23 + + // ISO21German is the MIB identifier with IANA name DIN_66003. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO21German MIB = 24 + + // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO60Norwegian1 MIB = 25 + + // ISO69French is the MIB identifier with IANA name NF_Z_62-010. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO69French MIB = 26 + + // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1. + // + // Universal Transfer Format (1), this is the multibyte + // encoding, that subsets ASCII-7. It does not have byte + // ordering issues. + ISO10646UTF1 MIB = 27 + + // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO646basic1983 MIB = 28 + + // INVARIANT is the MIB identifier with IANA name INVARIANT. + // + // Reference: RFC1345 + INVARIANT MIB = 29 + + // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2IntlRefVersion MIB = 30 + + // NATSSEFI is the MIB identifier with IANA name NATS-SEFI. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFI MIB = 31 + + // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFIADD MIB = 32 + + // NATSDANO is the MIB identifier with IANA name NATS-DANO. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANO MIB = 33 + + // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANOADD MIB = 34 + + // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10Swedish MIB = 35 + + // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + KSC56011987 MIB = 36 + + // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR). + // + // rfc1557 (see also KS_C_5601-1987) + // Reference: RFC1557 + ISO2022KR MIB = 37 + + // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR). + // + // rfc1557 (see also KS_C_5861-1992) + // Reference: RFC1557 + EUCKR MIB = 38 + + // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP). + // + // rfc1468 (see also rfc2237 ) + // Reference: RFC1468 + ISO2022JP MIB = 39 + + // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2). + // + // rfc1554 + // Reference: RFC1554 + ISO2022JP2 MIB = 40 + + // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO13JISC6220jp MIB = 41 + + // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO14JISC6220ro MIB = 42 + + // ISO16Portuguese is the MIB identifier with IANA name PT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO16Portuguese MIB = 43 + + // ISO18Greek7Old is the MIB identifier with IANA name greek7-old. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO18Greek7Old MIB = 44 + + // ISO19LatinGreek is the MIB identifier with IANA name latin-greek. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO19LatinGreek MIB = 45 + + // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO25French MIB = 46 + + // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO27LatinGreek1 MIB = 47 + + // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5427Cyrillic MIB = 48 + + // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO42JISC62261978 MIB = 49 + + // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO47BSViewdata MIB = 50 + + // ISO49INIS is the MIB identifier with IANA name INIS. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO49INIS MIB = 51 + + // ISO50INIS8 is the MIB identifier with IANA name INIS-8. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO50INIS8 MIB = 52 + + // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO51INISCyrillic MIB = 53 + + // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO54271981 MIB = 54 + + // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5428Greek MIB = 55 + + // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO57GB1988 MIB = 56 + + // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO58GB231280 MIB = 57 + + // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO61Norwegian2 MIB = 58 + + // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO70VideotexSupp1 MIB = 59 + + // ISO84Portuguese2 is the MIB identifier with IANA name PT2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO84Portuguese2 MIB = 60 + + // ISO85Spanish2 is the MIB identifier with IANA name ES2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO85Spanish2 MIB = 61 + + // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO86Hungarian MIB = 62 + + // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO87JISX0208 MIB = 63 + + // ISO88Greek7 is the MIB identifier with IANA name greek7. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO88Greek7 MIB = 64 + + // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO89ASMO449 MIB = 65 + + // ISO90 is the MIB identifier with IANA name iso-ir-90. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO90 MIB = 66 + + // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO91JISC62291984a MIB = 67 + + // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO92JISC62991984b MIB = 68 + + // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO93JIS62291984badd MIB = 69 + + // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO94JIS62291984hand MIB = 70 + + // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO95JIS62291984handadd MIB = 71 + + // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO96JISC62291984kana MIB = 72 + + // ISO2033 is the MIB identifier with IANA name ISO_2033-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2033 MIB = 73 + + // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO99NAPLPS MIB = 74 + + // ISO102T617bit is the MIB identifier with IANA name T.61-7bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO102T617bit MIB = 75 + + // ISO103T618bit is the MIB identifier with IANA name T.61-8bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO103T618bit MIB = 76 + + // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. + // + // ISO registry + // (formerly ECMA + // registry ) + ISO111ECMACyrillic MIB = 77 + + // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO121Canadian1 MIB = 78 + + // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO122Canadian2 MIB = 79 + + // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO123CSAZ24341985gr MIB = 80 + + // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88596E MIB = 81 + + // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88596I MIB = 82 + + // ISO128T101G2 is the MIB identifier with IANA name T.101-G2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO128T101G2 MIB = 83 + + // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88598E MIB = 84 + + // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88598I MIB = 85 + + // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO139CSN369103 MIB = 86 + + // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO141JUSIB1002 MIB = 87 + + // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO143IECP271 MIB = 88 + + // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO146Serbian MIB = 89 + + // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO147Macedonian MIB = 90 + + // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO150GreekCCITT MIB = 91 + + // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO151Cuba MIB = 92 + + // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO6937Add MIB = 93 + + // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO153GOST1976874 MIB = 94 + + // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO8859Supp MIB = 95 + + // ISO10367Box is the MIB identifier with IANA name ISO_10367-box. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10367Box MIB = 96 + + // ISO158Lap is the MIB identifier with IANA name latin-lap. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO158Lap MIB = 97 + + // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO159JISX02121990 MIB = 98 + + // ISO646Danish is the MIB identifier with IANA name DS_2089. + // + // Danish Standard, DS 2089, February 1974 + // Reference: RFC1345 + ISO646Danish MIB = 99 + + // USDK is the MIB identifier with IANA name us-dk. + // + // Reference: RFC1345 + USDK MIB = 100 + + // DKUS is the MIB identifier with IANA name dk-us. + // + // Reference: RFC1345 + DKUS MIB = 101 + + // KSC5636 is the MIB identifier with IANA name KSC5636. + // + // Reference: RFC1345 + KSC5636 MIB = 102 + + // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7. + // + // rfc1642 + // Reference: RFC1642 + Unicode11UTF7 MIB = 103 + + // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CN MIB = 104 + + // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CNEXT MIB = 105 + + // UTF8 is the MIB identifier with IANA name UTF-8. + // + // rfc3629 + // Reference: RFC3629 + UTF8 MIB = 106 + + // ISO885913 is the MIB identifier with IANA name ISO-8859-13. + // + // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-13 http://www.iana.org/assignments/charset-reg/ISO-8859-13 + ISO885913 MIB = 109 + + // ISO885914 is the MIB identifier with IANA name ISO-8859-14. + // + // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-14 + ISO885914 MIB = 110 + + // ISO885915 is the MIB identifier with IANA name ISO-8859-15. + // + // ISO + // Please see: http://www.iana.org/assignments/charset-reg/ISO-8859-15 + ISO885915 MIB = 111 + + // ISO885916 is the MIB identifier with IANA name ISO-8859-16. + // + // ISO + ISO885916 MIB = 112 + + // GBK is the MIB identifier with IANA name GBK. + // + // Chinese IT Standardization Technical Committee + // Please see: http://www.iana.org/assignments/charset-reg/GBK + GBK MIB = 113 + + // GB18030 is the MIB identifier with IANA name GB18030. + // + // Chinese IT Standardization Technical Committee + // Please see: http://www.iana.org/assignments/charset-reg/GB18030 + GB18030 MIB = 114 + + // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 + OSDEBCDICDF0415 MIB = 115 + + // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV + OSDEBCDICDF03IRV MIB = 116 + + // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 + OSDEBCDICDF041 MIB = 117 + + // ISO115481 is the MIB identifier with IANA name ISO-11548-1. + // + // See http://www.iana.org/assignments/charset-reg/ISO-11548-1 + ISO115481 MIB = 118 + + // KZ1048 is the MIB identifier with IANA name KZ-1048. + // + // See http://www.iana.org/assignments/charset-reg/KZ-1048 + KZ1048 MIB = 119 + + // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. + // + // the 2-octet Basic Multilingual Plane, aka Unicode + // this needs to specify network byte order: the standard + // does not specify (it is a 16-bit integer space) + Unicode MIB = 1000 + + // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4. + // + // the full code space. (same comment about byte order, + // these are 31-bit numbers. + UCS4 MIB = 1001 + + // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic. + // + // ASCII subset of Unicode. Basic Latin = collection 1 + // See ISO 10646, Appendix A + UnicodeASCII MIB = 1002 + + // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1. + // + // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1 + // Supplement = collections 1 and 2. See ISO 10646, + // Appendix A. See rfc1815 . + UnicodeLatin1 MIB = 1003 + + // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1. + // + // ISO 10646 Japanese, see rfc1815 . + UnicodeJapanese MIB = 1004 + + // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261. + // + // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261 + UnicodeIBM1261 MIB = 1005 + + // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268. + // + // IBM Latin-4 Extended Presentation Set, GCSGID: 1268 + UnicodeIBM1268 MIB = 1006 + + // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276. + // + // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276 + UnicodeIBM1276 MIB = 1007 + + // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264. + // + // IBM Arabic Presentation Set, GCSGID: 1264 + UnicodeIBM1264 MIB = 1008 + + // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265. + // + // IBM Hebrew Presentation Set, GCSGID: 1265 + UnicodeIBM1265 MIB = 1009 + + // Unicode11 is the MIB identifier with IANA name UNICODE-1-1. + // + // rfc1641 + // Reference: RFC1641 + Unicode11 MIB = 1010 + + // SCSU is the MIB identifier with IANA name SCSU. + // + // SCSU See http://www.iana.org/assignments/charset-reg/SCSU + SCSU MIB = 1011 + + // UTF7 is the MIB identifier with IANA name UTF-7. + // + // rfc2152 + // Reference: RFC2152 + UTF7 MIB = 1012 + + // UTF16BE is the MIB identifier with IANA name UTF-16BE. + // + // rfc2781 + // Reference: RFC2781 + UTF16BE MIB = 1013 + + // UTF16LE is the MIB identifier with IANA name UTF-16LE. + // + // rfc2781 + // Reference: RFC2781 + UTF16LE MIB = 1014 + + // UTF16 is the MIB identifier with IANA name UTF-16. + // + // rfc2781 + // Reference: RFC2781 + UTF16 MIB = 1015 + + // CESU8 is the MIB identifier with IANA name CESU-8. + // + // http://www.unicode.org/unicode/reports/tr26 + CESU8 MIB = 1016 + + // UTF32 is the MIB identifier with IANA name UTF-32. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32 MIB = 1017 + + // UTF32BE is the MIB identifier with IANA name UTF-32BE. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32BE MIB = 1018 + + // UTF32LE is the MIB identifier with IANA name UTF-32LE. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32LE MIB = 1019 + + // BOCU1 is the MIB identifier with IANA name BOCU-1. + // + // http://www.unicode.org/notes/tn6/ + BOCU1 MIB = 1020 + + // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.0. + // PCL Symbol Set id: 9U + Windows30Latin1 MIB = 2000 + + // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.1. + // PCL Symbol Set id: 19U + Windows31Latin1 MIB = 2001 + + // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2. + // + // Extended ISO 8859-2. Latin-2 for Windows 3.1. + // PCL Symbol Set id: 9E + Windows31Latin2 MIB = 2002 + + // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5. + // + // Extended ISO 8859-9. Latin-5 for Windows 3.1 + // PCL Symbol Set id: 5T + Windows31Latin5 MIB = 2003 + + // HPRoman8 is the MIB identifier with IANA name hp-roman8. + // + // LaserJet IIP Printer User's Manual, + // HP part no 33471-90901, Hewlet-Packard, June 1989. + // Reference: RFC1345 + HPRoman8 MIB = 2004 + + // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 10J + AdobeStandardEncoding MIB = 2005 + + // VenturaUS is the MIB identifier with IANA name Ventura-US. + // + // Ventura US. ASCII plus characters typically used in + // publishing, like pilcrow, copyright, registered, trade mark, + // section, dagger, and double dagger in the range A0 (hex) + // to FF (hex). + // PCL Symbol Set id: 14J + VenturaUS MIB = 2006 + + // VenturaInternational is the MIB identifier with IANA name Ventura-International. + // + // Ventura International. ASCII plus coded characters similar + // to Roman8. + // PCL Symbol Set id: 13J + VenturaInternational MIB = 2007 + + // DECMCS is the MIB identifier with IANA name DEC-MCS. + // + // VAX/VMS User's Manual, + // Order Number: AI-Y517A-TE, April 1986. + // Reference: RFC1345 + DECMCS MIB = 2008 + + // PC850Multilingual is the MIB identifier with IANA name IBM850. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC850Multilingual MIB = 2009 + + // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian. + // + // PC Danish Norwegian + // 8-bit PC set for Danish Norwegian + // PCL Symbol Set id: 11U + PC8DanishNorwegian MIB = 2012 + + // PC862LatinHebrew is the MIB identifier with IANA name IBM862. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC862LatinHebrew MIB = 2013 + + // PC8Turkish is the MIB identifier with IANA name PC8-Turkish. + // + // PC Latin Turkish. PCL Symbol Set id: 9T + PC8Turkish MIB = 2014 + + // IBMSymbols is the MIB identifier with IANA name IBM-Symbols. + // + // Presentation Set, CPGID: 259 + IBMSymbols MIB = 2015 + + // IBMThai is the MIB identifier with IANA name IBM-Thai. + // + // Presentation Set, CPGID: 838 + IBMThai MIB = 2016 + + // HPLegal is the MIB identifier with IANA name HP-Legal. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 1U + HPLegal MIB = 2017 + + // HPPiFont is the MIB identifier with IANA name HP-Pi-font. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 15U + HPPiFont MIB = 2018 + + // HPMath8 is the MIB identifier with IANA name HP-Math8. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 8M + HPMath8 MIB = 2019 + + // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 5M + HPPSMath MIB = 2020 + + // HPDesktop is the MIB identifier with IANA name HP-DeskTop. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 7J + HPDesktop MIB = 2021 + + // VenturaMath is the MIB identifier with IANA name Ventura-Math. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6M + VenturaMath MIB = 2022 + + // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6J + MicrosoftPublishing MIB = 2023 + + // Windows31J is the MIB identifier with IANA name Windows-31J. + // + // Windows Japanese. A further extension of Shift_JIS + // to include NEC special characters (Row 13), NEC + // selection of IBM extensions (Rows 89 to 92), and IBM + // extensions (Rows 115 to 119). The CCS's are + // JIS X0201:1997, JIS X0208:1997, and these extensions. + // This charset can be used for the top-level media type "text", + // but it is of limited or specialized use (see rfc2278 ). + // PCL Symbol Set id: 19K + Windows31J MIB = 2024 + + // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312). + // + // Chinese for People's Republic of China (PRC) mixed one byte, + // two byte set: + // 20-7E = one byte ASCII + // A1-FE = two byte PRC Kanji + // See GB 2312-80 + // PCL Symbol Set Id: 18C + GB2312 MIB = 2025 + + // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5). + // + // Chinese for Taiwan Multi-byte set. + // PCL Symbol Set Id: 18T + Big5 MIB = 2026 + + // Macintosh is the MIB identifier with IANA name macintosh. + // + // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991 + // Reference: RFC1345 + Macintosh MIB = 2027 + + // IBM037 is the MIB identifier with IANA name IBM037. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM037 MIB = 2028 + + // IBM038 is the MIB identifier with IANA name IBM038. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM038 MIB = 2029 + + // IBM273 is the MIB identifier with IANA name IBM273. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM273 MIB = 2030 + + // IBM274 is the MIB identifier with IANA name IBM274. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM274 MIB = 2031 + + // IBM275 is the MIB identifier with IANA name IBM275. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM275 MIB = 2032 + + // IBM277 is the MIB identifier with IANA name IBM277. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM277 MIB = 2033 + + // IBM278 is the MIB identifier with IANA name IBM278. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM278 MIB = 2034 + + // IBM280 is the MIB identifier with IANA name IBM280. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM280 MIB = 2035 + + // IBM281 is the MIB identifier with IANA name IBM281. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM281 MIB = 2036 + + // IBM284 is the MIB identifier with IANA name IBM284. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM284 MIB = 2037 + + // IBM285 is the MIB identifier with IANA name IBM285. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM285 MIB = 2038 + + // IBM290 is the MIB identifier with IANA name IBM290. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM290 MIB = 2039 + + // IBM297 is the MIB identifier with IANA name IBM297. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM297 MIB = 2040 + + // IBM420 is the MIB identifier with IANA name IBM420. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990, + // IBM NLS RM p 11-11 + // Reference: RFC1345 + IBM420 MIB = 2041 + + // IBM423 is the MIB identifier with IANA name IBM423. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM423 MIB = 2042 + + // IBM424 is the MIB identifier with IANA name IBM424. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM424 MIB = 2043 + + // PC8CodePage437 is the MIB identifier with IANA name IBM437. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC8CodePage437 MIB = 2011 + + // IBM500 is the MIB identifier with IANA name IBM500. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM500 MIB = 2044 + + // IBM851 is the MIB identifier with IANA name IBM851. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM851 MIB = 2045 + + // PCp852 is the MIB identifier with IANA name IBM852. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PCp852 MIB = 2010 + + // IBM855 is the MIB identifier with IANA name IBM855. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM855 MIB = 2046 + + // IBM857 is the MIB identifier with IANA name IBM857. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM857 MIB = 2047 + + // IBM860 is the MIB identifier with IANA name IBM860. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM860 MIB = 2048 + + // IBM861 is the MIB identifier with IANA name IBM861. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM861 MIB = 2049 + + // IBM863 is the MIB identifier with IANA name IBM863. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM863 MIB = 2050 + + // IBM864 is the MIB identifier with IANA name IBM864. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM864 MIB = 2051 + + // IBM865 is the MIB identifier with IANA name IBM865. + // + // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987) + // Reference: RFC1345 + IBM865 MIB = 2052 + + // IBM868 is the MIB identifier with IANA name IBM868. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM868 MIB = 2053 + + // IBM869 is the MIB identifier with IANA name IBM869. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM869 MIB = 2054 + + // IBM870 is the MIB identifier with IANA name IBM870. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM870 MIB = 2055 + + // IBM871 is the MIB identifier with IANA name IBM871. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM871 MIB = 2056 + + // IBM880 is the MIB identifier with IANA name IBM880. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM880 MIB = 2057 + + // IBM891 is the MIB identifier with IANA name IBM891. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM891 MIB = 2058 + + // IBM903 is the MIB identifier with IANA name IBM903. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM903 MIB = 2059 + + // IBBM904 is the MIB identifier with IANA name IBM904. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBBM904 MIB = 2060 + + // IBM905 is the MIB identifier with IANA name IBM905. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM905 MIB = 2061 + + // IBM918 is the MIB identifier with IANA name IBM918. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM918 MIB = 2062 + + // IBM1026 is the MIB identifier with IANA name IBM1026. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM1026 MIB = 2063 + + // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + IBMEBCDICATDE MIB = 2064 + + // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICATDEA MIB = 2065 + + // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICCAFR MIB = 2066 + + // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNO MIB = 2067 + + // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNOA MIB = 2068 + + // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISE MIB = 2069 + + // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISEA MIB = 2070 + + // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFR MIB = 2071 + + // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICIT MIB = 2072 + + // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICPT MIB = 2073 + + // EBCDICES is the MIB identifier with IANA name EBCDIC-ES. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICES MIB = 2074 + + // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESA MIB = 2075 + + // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESS MIB = 2076 + + // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUK MIB = 2077 + + // EBCDICUS is the MIB identifier with IANA name EBCDIC-US. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUS MIB = 2078 + + // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT. + // + // Reference: RFC1428 + Unknown8BiT MIB = 2079 + + // Mnemonic is the MIB identifier with IANA name MNEMONIC. + // + // rfc1345 , also known as "mnemonic+ascii+38" + // Reference: RFC1345 + Mnemonic MIB = 2080 + + // Mnem is the MIB identifier with IANA name MNEM. + // + // rfc1345 , also known as "mnemonic+ascii+8200" + // Reference: RFC1345 + Mnem MIB = 2081 + + // VISCII is the MIB identifier with IANA name VISCII. + // + // rfc1456 + // Reference: RFC1456 + VISCII MIB = 2082 + + // VIQR is the MIB identifier with IANA name VIQR. + // + // rfc1456 + // Reference: RFC1456 + VIQR MIB = 2083 + + // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R). + // + // rfc1489 , based on GOST-19768-74, ISO-6937/8, + // INIS-Cyrillic, ISO-5427. + // Reference: RFC1489 + KOI8R MIB = 2084 + + // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312. + // + // rfc1842 , rfc1843 rfc1843 rfc1842 + HZGB2312 MIB = 2085 + + // IBM866 is the MIB identifier with IANA name IBM866. + // + // IBM NLDG Volume 2 (SE09-8002-03) August 1994 + IBM866 MIB = 2086 + + // PC775Baltic is the MIB identifier with IANA name IBM775. + // + // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996 + PC775Baltic MIB = 2087 + + // KOI8U is the MIB identifier with IANA name KOI8-U. + // + // rfc2319 + // Reference: RFC2319 + KOI8U MIB = 2088 + + // IBM00858 is the MIB identifier with IANA name IBM00858. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM00858 + IBM00858 MIB = 2089 + + // IBM00924 is the MIB identifier with IANA name IBM00924. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM00924 + IBM00924 MIB = 2090 + + // IBM01140 is the MIB identifier with IANA name IBM01140. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01140 + IBM01140 MIB = 2091 + + // IBM01141 is the MIB identifier with IANA name IBM01141. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01141 + IBM01141 MIB = 2092 + + // IBM01142 is the MIB identifier with IANA name IBM01142. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01142 + IBM01142 MIB = 2093 + + // IBM01143 is the MIB identifier with IANA name IBM01143. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01143 + IBM01143 MIB = 2094 + + // IBM01144 is the MIB identifier with IANA name IBM01144. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01144 + IBM01144 MIB = 2095 + + // IBM01145 is the MIB identifier with IANA name IBM01145. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01145 + IBM01145 MIB = 2096 + + // IBM01146 is the MIB identifier with IANA name IBM01146. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01146 + IBM01146 MIB = 2097 + + // IBM01147 is the MIB identifier with IANA name IBM01147. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01147 + IBM01147 MIB = 2098 + + // IBM01148 is the MIB identifier with IANA name IBM01148. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01148 + IBM01148 MIB = 2099 + + // IBM01149 is the MIB identifier with IANA name IBM01149. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01149 + IBM01149 MIB = 2100 + + // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. + // + // See http://www.iana.org/assignments/charset-reg/Big5-HKSCS + Big5HKSCS MIB = 2101 + + // IBM1047 is the MIB identifier with IANA name IBM1047. + // + // IBM1047 (EBCDIC Latin 1/Open Systems) http://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf + IBM1047 MIB = 2102 + + // PTCP154 is the MIB identifier with IANA name PTCP154. + // + // See http://www.iana.org/assignments/charset-reg/PTCP154 + PTCP154 MIB = 2103 + + // Amiga1251 is the MIB identifier with IANA name Amiga-1251. + // + // See http://www.amiga.ultranet.ru/Amiga-1251.html + Amiga1251 MIB = 2104 + + // KOI7switched is the MIB identifier with IANA name KOI7-switched. + // + // See http://www.iana.org/assignments/charset-reg/KOI7-switched + KOI7switched MIB = 2105 + + // BRF is the MIB identifier with IANA name BRF. + // + // See http://www.iana.org/assignments/charset-reg/BRF + BRF MIB = 2106 + + // TSCII is the MIB identifier with IANA name TSCII. + // + // See http://www.iana.org/assignments/charset-reg/TSCII + TSCII MIB = 2107 + + // CP51932 is the MIB identifier with IANA name CP51932. + // + // See http://www.iana.org/assignments/charset-reg/CP51932 + CP51932 MIB = 2108 + + // Windows874 is the MIB identifier with IANA name windows-874. + // + // See http://www.iana.org/assignments/charset-reg/windows-874 + Windows874 MIB = 2109 + + // Windows1250 is the MIB identifier with IANA name windows-1250. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1250 + Windows1250 MIB = 2250 + + // Windows1251 is the MIB identifier with IANA name windows-1251. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1251 + Windows1251 MIB = 2251 + + // Windows1252 is the MIB identifier with IANA name windows-1252. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1252 + Windows1252 MIB = 2252 + + // Windows1253 is the MIB identifier with IANA name windows-1253. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1253 + Windows1253 MIB = 2253 + + // Windows1254 is the MIB identifier with IANA name windows-1254. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1254 + Windows1254 MIB = 2254 + + // Windows1255 is the MIB identifier with IANA name windows-1255. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1255 + Windows1255 MIB = 2255 + + // Windows1256 is the MIB identifier with IANA name windows-1256. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1256 + Windows1256 MIB = 2256 + + // Windows1257 is the MIB identifier with IANA name windows-1257. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1257 + Windows1257 MIB = 2257 + + // Windows1258 is the MIB identifier with IANA name windows-1258. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1258 + Windows1258 MIB = 2258 + + // TIS620 is the MIB identifier with IANA name TIS-620. + // + // Thai Industrial Standards Institute (TISI) + TIS620 MIB = 2259 + + // CP50220 is the MIB identifier with IANA name CP50220. + // + // See http://www.iana.org/assignments/charset-reg/CP50220 + CP50220 MIB = 2260 +) diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..c2f2c7729d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,327 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/health/v1/health.proto + +package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_health_6b1a06aa67f91efd, []int{0} +} +func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) +} +func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) +} +func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckRequest.Merge(dst, src) +} +func (m *HealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_HealthCheckRequest.Size(m) +} +func (m *HealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_health_6b1a06aa67f91efd, []int{1} +} +func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) +} +func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) +} +func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckResponse.Merge(dst, src) +} +func (m *HealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_HealthCheckResponse.Size(m) +} +func (m *HealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} + +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) } + +var fileDescriptor_health_6b1a06aa67f91efd = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, + 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, + 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, + 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, + 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, + 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, + 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, + 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, + 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, + 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, + 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, + 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, + 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, + 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, + 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, + 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 00000000000..10666f2d324 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthpb.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthpb.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthpb.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthpb.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } + s.mu.Unlock() +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/LICENSE.txt b/vendor/gopkg.in/russross/blackfriday.v2/LICENSE.txt new file mode 100644 index 00000000000..2885af3602d --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/russross/blackfriday.v2/block.go b/vendor/gopkg.in/russross/blackfriday.v2/block.go new file mode 100644 index 00000000000..b8607474e59 --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/block.go @@ -0,0 +1,1590 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + + "github.com/shurcooL/sanitized_anchor_name" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
          + // ... + //
          + if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = sanitized_anchor_name.Create(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
          tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
          tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = sanitized_anchor_name.Create(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/doc.go b/vendor/gopkg.in/russross/blackfriday.v2/doc.go new file mode 100644 index 00000000000..5b3fa9876ac --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/doc.go @@ -0,0 +1,18 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +package blackfriday diff --git a/vendor/gopkg.in/russross/blackfriday.v2/esc.go b/vendor/gopkg.in/russross/blackfriday.v2/esc.go new file mode 100644 index 00000000000..6385f27cb6a --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/esc.go @@ -0,0 +1,34 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/html.go b/vendor/gopkg.in/russross/blackfriday.v2/html.go new file mode 100644 index 00000000000..284c87184f7 --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/html.go @@ -0,0 +1,949 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

          becomes

          etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + params.FootnoteReturnLinkContents = `[return]` + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
        • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
          ") + brXHTMLTag = []byte("
          ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
          ")
          +	preCloseTag        = []byte("
          ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

          ") + pCloseTag = []byte("

          ") + blockquoteTag = []byte("
          ") + blockquoteCloseTag = []byte("
          ") + hrTag = []byte("
          ") + hrXHTMLTag = []byte("
          ") + ulTag = []byte("
            ") + ulCloseTag = []byte("
          ") + olTag = []byte("
            ") + olCloseTag = []byte("
          ") + dlTag = []byte("
          ") + dlCloseTag = []byte("
          ") + liTag = []byte("
        • ") + liCloseTag = []byte("
        • ") + ddTag = []byte("
          ") + ddCloseTag = []byte("
          ") + dtTag = []byte("
          ") + dtCloseTag = []byte("
          ") + tableTag = []byte("") + tableCloseTag = []byte("
          ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
          \n\n") + footnotesCloseDivBytes = []byte("\n
          \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
        • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
        • \n

        ") + } + buf.WriteString("\n\n
      • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/inline.go b/vendor/gopkg.in/russross/blackfriday.v2/inline.go new file mode 100644 index 00000000000..4ed2907921e --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
        +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case data[i-1] == '\\': + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/markdown.go b/vendor/gopkg.in/russross/blackfriday.v2/markdown.go new file mode 100644 index 00000000000..58d2e4538c6 --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

        link

        +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/node.go b/vendor/gopkg.in/russross/blackfriday.v2/node.go new file mode 100644 index 00000000000..51b9e8c1b53 --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/node.go @@ -0,0 +1,354 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

        s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +func (n *Node) isContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.isContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/gopkg.in/russross/blackfriday.v2/smartypants.go b/vendor/gopkg.in/russross/blackfriday.v2/smartypants.go new file mode 100644 index 00000000000..3a220e94247 --- /dev/null +++ b/vendor/gopkg.in/russross/blackfriday.v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/webhook/webhook.go b/webhook/webhook.go index c29ca4b21ae..000543a8798 100644 --- a/webhook/webhook.go +++ b/webhook/webhook.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Skaffold Authors +Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.